diff --git a/.buildkite/pipelines/ecs-dynamic-template-tests.yml b/.buildkite/pipelines/ecs-dynamic-template-tests.yml index a8145c61a2d40..1c6c18983b082 100644 --- a/.buildkite/pipelines/ecs-dynamic-template-tests.yml +++ b/.buildkite/pipelines/ecs-dynamic-template-tests.yml @@ -10,5 +10,7 @@ steps: notify: - slack: "#es-delivery" if: build.state == "failed" + - slack: "#es-data-management" + if: build.state == "failed" - email: "logs-plus@elastic.co" if: build.state == "failed" diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index 32b0a12f06a0e..66b989d94455c 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -32,6 +32,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index fd0684d666d64..49c2d34df7e31 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -33,6 +33,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" @@ -40,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.15", "8.11.1", "8.12.0"] + BWC_VERSION: ["7.17.16", "8.11.2", "8.12.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 3043872845779..fab90c8ed6d17 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1073,6 +1073,22 @@ steps: env: BWC_VERSION: 7.17.15 + - label: "{{matrix.image}} / 7.17.16 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.16 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.16 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 @@ -1713,6 +1729,22 @@ steps: env: BWC_VERSION: 8.11.1 + - label: "{{matrix.image}} / 8.11.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.2 + - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.bwc.template.yml b/.buildkite/pipelines/periodic.bwc.template.yml index 8a8c43d75e3ef..34e9aa656e340 100644 --- a/.buildkite/pipelines/periodic.bwc.template.yml +++ b/.buildkite/pipelines/periodic.bwc.template.yml @@ -4,7 +4,7 @@ agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: $BWC_VERSION \ No newline at end of file diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index e1ea27c2468e3..88738c88ef5a0 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -8,7 +8,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.0.0 @@ -18,7 +18,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.0.1 @@ -28,7 +28,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.1.0 @@ -38,7 +38,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.1.1 @@ -48,7 +48,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.2.0 @@ -58,7 +58,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.2.1 @@ -68,7 +68,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.0 @@ -78,7 +78,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.1 @@ -88,7 +88,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.3.2 @@ -98,7 +98,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.0 @@ -108,7 +108,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.1 @@ -118,7 +118,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.4.2 @@ -128,7 +128,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.0 @@ -138,7 +138,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.1 @@ -148,7 +148,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.5.2 @@ -158,7 +158,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.0 @@ -168,7 +168,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.1 @@ -178,7 +178,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.6.2 @@ -188,7 +188,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.7.0 @@ -198,7 +198,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.7.1 @@ -208,7 +208,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.8.0 @@ -218,7 +218,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.8.1 @@ -228,7 +228,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.0 @@ -238,7 +238,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.1 @@ -248,7 +248,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.2 @@ -258,7 +258,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.9.3 @@ -268,7 +268,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.0 @@ -278,7 +278,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.1 @@ -288,7 +288,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.10.2 @@ -298,7 +298,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.0 @@ -308,7 +308,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.1 @@ -318,7 +318,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.11.2 @@ -328,7 +328,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.12.0 @@ -338,7 +338,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.12.1 @@ -348,7 +348,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.0 @@ -358,7 +358,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.1 @@ -368,7 +368,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.2 @@ -378,7 +378,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.3 @@ -388,7 +388,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.13.4 @@ -398,7 +398,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.0 @@ -408,7 +408,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.1 @@ -418,7 +418,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.14.2 @@ -428,7 +428,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.0 @@ -438,7 +438,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.1 @@ -448,7 +448,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.15.2 @@ -458,7 +458,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.0 @@ -468,7 +468,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.1 @@ -478,7 +478,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.2 @@ -488,7 +488,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.16.3 @@ -498,7 +498,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.0 @@ -508,7 +508,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.1 @@ -518,7 +518,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.2 @@ -528,7 +528,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.3 @@ -538,7 +538,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.4 @@ -548,7 +548,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.5 @@ -558,7 +558,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.6 @@ -568,7 +568,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.7 @@ -578,7 +578,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.8 @@ -588,7 +588,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.9 @@ -598,7 +598,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.10 @@ -608,7 +608,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.11 @@ -618,7 +618,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.12 @@ -628,7 +628,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.13 @@ -638,7 +638,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.14 @@ -648,17 +648,27 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.15 + - label: 7.17.16 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.16#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.16 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.0.0 @@ -668,7 +678,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.0.1 @@ -678,7 +688,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.0 @@ -688,7 +698,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.1 @@ -698,7 +708,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.2 @@ -708,7 +718,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.1.3 @@ -718,7 +728,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.0 @@ -728,7 +738,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.1 @@ -738,7 +748,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.2 @@ -748,7 +758,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.2.3 @@ -758,7 +768,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.0 @@ -768,7 +778,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.1 @@ -778,7 +788,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.2 @@ -788,7 +798,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.3.3 @@ -798,7 +808,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.0 @@ -808,7 +818,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.1 @@ -818,7 +828,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.2 @@ -828,7 +838,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.4.3 @@ -838,7 +848,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.0 @@ -848,7 +858,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.1 @@ -858,7 +868,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.2 @@ -868,7 +878,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.5.3 @@ -878,7 +888,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.0 @@ -888,7 +898,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.1 @@ -898,7 +908,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.6.2 @@ -908,7 +918,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.7.0 @@ -918,7 +928,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.7.1 @@ -928,7 +938,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.0 @@ -938,7 +948,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.1 @@ -948,7 +958,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.8.2 @@ -958,7 +968,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.0 @@ -968,7 +978,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.1 @@ -978,7 +988,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.9.2 @@ -988,7 +998,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.0 @@ -998,7 +1008,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.1 @@ -1008,7 +1018,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.2 @@ -1018,7 +1028,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.3 @@ -1028,7 +1038,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.4 @@ -1038,7 +1048,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.0 @@ -1048,17 +1058,27 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.1 + - label: 8.11.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.2 - label: 8.12.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.0 diff --git a/.buildkite/pipelines/pull-request/bwc-snapshots.yml b/.buildkite/pipelines/pull-request/bwc-snapshots.yml index 21873475056ea..5a9fc2d938ac0 100644 --- a/.buildkite/pipelines/pull-request/bwc-snapshots.yml +++ b/.buildkite/pipelines/pull-request/bwc-snapshots.yml @@ -16,5 +16,5 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-4-fips.yml b/.buildkite/pipelines/pull-request/part-4-fips.yml new file mode 100644 index 0000000000000..11a50456ca4c0 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4-fips.yml @@ -0,0 +1,11 @@ +config: + allow-labels: "Team:Security" +steps: + - label: part-4-fips + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-4-windows.yml b/.buildkite/pipelines/pull-request/part-4-windows.yml new file mode 100644 index 0000000000000..0493e8af0cf8f --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4-windows.yml @@ -0,0 +1,14 @@ +config: + allow-labels: "test-windows" +steps: + - label: part-4-windows + command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-windows-2022 + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: checkPart4 diff --git a/.buildkite/pipelines/pull-request/part-4.yml b/.buildkite/pipelines/pull-request/part-4.yml new file mode 100644 index 0000000000000..af11f08953d07 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-4.yml @@ -0,0 +1,11 @@ +config: + skip-target-branches: "7.17" +steps: + - label: part-4 + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart4 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index b59bdc79ad293..c4aa43c775b1e 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -11,7 +11,7 @@ "set_commit_status": false, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "run\\W+elasticsearch-ci.+", + "trigger_comment_regex": "(run\\W+elasticsearch-ci.+)|(^\\s*(buildkite\\s*)?test\\s+this(\\s+please)?)", "cancel_intermediate_builds": true, "cancel_intermediate_builds_on_comment": false }, diff --git a/.buildkite/scripts/dra-workflow.sh b/.buildkite/scripts/dra-workflow.sh index 4611379009a08..ecfb8088072a0 100755 --- a/.buildkite/scripts/dra-workflow.sh +++ b/.buildkite/scripts/dra-workflow.sh @@ -13,8 +13,8 @@ fi echo --- Preparing # TODO move this to image -sudo apt-get update -y -sudo apt-get install -y libxml2-utils python3.10-venv +sudo NEEDRESTART_MODE=l apt-get update -y +sudo NEEDRESTART_MODE=l apt-get install -y libxml2-utils python3.10-venv RM_BRANCH="$BRANCH" if [[ "$BRANCH" == "main" ]]; then diff --git a/.buildkite/scripts/pull-request/pipeline.test.ts b/.buildkite/scripts/pull-request/pipeline.test.ts index e13b1e1f73278..d0634752260e4 100644 --- a/.buildkite/scripts/pull-request/pipeline.test.ts +++ b/.buildkite/scripts/pull-request/pipeline.test.ts @@ -12,21 +12,28 @@ describe("generatePipelines", () => { process.env["GITHUB_PR_TRIGGER_COMMENT"] = ""; }); - test("should generate correct pipelines with a non-docs change", () => { - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle", "docs/README.asciidoc"]); + // Helper for testing pipeline generations that should be the same when using the overall ci trigger comment "buildkite test this" + const testWithTriggerCheck = (directory: string, changedFiles?: string[]) => { + const pipelines = generatePipelines(directory, changedFiles); expect(pipelines).toMatchSnapshot(); + + process.env["GITHUB_PR_TRIGGER_COMMENT"] = "buildkite test this"; + const pipelinesWithTriggerComment = generatePipelines(directory, changedFiles); + expect(pipelinesWithTriggerComment).toEqual(pipelines); + }; + + test("should generate correct pipelines with a non-docs change", () => { + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["build.gradle", "docs/README.asciidoc"]); }); test("should generate correct pipelines with only docs changes", () => { - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["docs/README.asciidoc"]); - expect(pipelines).toMatchSnapshot(); + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["docs/README.asciidoc"]); }); test("should generate correct pipelines with full BWC expansion", () => { process.env["GITHUB_PR_LABELS"] = "test-full-bwc"; - const pipelines = generatePipelines(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]); - expect(pipelines).toMatchSnapshot(); + testWithTriggerCheck(`${import.meta.dir}/mocks/pipelines`, ["build.gradle"]); }); test("should generate correct pipeline when using a trigger comment for it", () => { diff --git a/.buildkite/scripts/pull-request/pipeline.ts b/.buildkite/scripts/pull-request/pipeline.ts index 600e0373d9cfc..65aec47fe3cc8 100644 --- a/.buildkite/scripts/pull-request/pipeline.ts +++ b/.buildkite/scripts/pull-request/pipeline.ts @@ -144,8 +144,12 @@ export const generatePipelines = ( (pipeline) => changedFilesIncludedCheck(pipeline, changedFiles), ]; - // When triggering via comment, we ONLY want to run pipelines that match the trigger phrase, regardless of labels, etc - if (process.env["GITHUB_PR_TRIGGER_COMMENT"]) { + // When triggering via the "run elasticsearch-ci/step-name" comment, we ONLY want to run pipelines that match the trigger phrase, regardless of labels, etc + // However, if we're using the overall CI trigger "[buildkite] test this [please]", we should use the regular filters above + if ( + process.env["GITHUB_PR_TRIGGER_COMMENT"] && + !process.env["GITHUB_PR_TRIGGER_COMMENT"].match(/^\s*(buildkite\s*)?test\s+this(\s+please)?/i) + ) { filters = [triggerCommentCheck]; } diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 688d84e1c49c8..581ec2f1565b6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -64,6 +64,7 @@ BWC_VERSION: - "7.17.13" - "7.17.14" - "7.17.15" + - "7.17.16" - "8.0.0" - "8.0.1" - "8.1.0" @@ -104,4 +105,5 @@ BWC_VERSION: - "8.10.4" - "8.11.0" - "8.11.1" + - "8.11.2" - "8.12.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index fe40ec8fd1d29..7970d655f4014 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - - "7.17.15" - - "8.11.1" + - "7.17.16" + - "8.11.2" - "8.12.0" diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml index ae2c8d2be1deb..3efd2cce181d4 100644 --- a/.idea/inspectionProfiles/Project_Default.xml +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -2,12 +2,13 @@ diff --git a/.idea/scopes/Production_minus_fixtures.xml b/.idea/scopes/Production_minus_fixtures.xml new file mode 100644 index 0000000000000..07510326481b4 --- /dev/null +++ b/.idea/scopes/Production_minus_fixtures.xml @@ -0,0 +1,3 @@ + + + diff --git a/BUILDING.md b/BUILDING.md index 814a9fb60ded8..127d422fad089 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -3,7 +3,7 @@ Building Elasticsearch with Gradle Elasticsearch is built using the [Gradle](https://gradle.org/) open source build tools. -This document provides a general guidelines for using and working on the elasticsearch build logic. +This document provides a general guidelines for using and working on the Elasticsearch build logic. ## Build logic organisation @@ -11,56 +11,56 @@ The Elasticsearch project contains 3 build-related projects that are included in ### `build-conventions` -This project contains build conventions that are applied to all elasticsearch projects. +This project contains build conventions that are applied to all Elasticsearch projects. ### `build-tools` -This project contains all build logic that we publish for third party elasticsearch plugin authors. +This project contains all build logic that we publish for third party Elasticsearch plugin authors. We provide the following plugins: -- `elasticsearch.esplugin` - A gradle plugin for building an elasticsearch plugin. -- `elasticsearch.testclusters` - A gradle plugin for setting up es clusters for testing within a build. +- `elasticsearch.esplugin` - A Gradle plugin for building an elasticsearch plugin. +- `elasticsearch.testclusters` - A Gradle plugin for setting up es clusters for testing within a build. -This project is published as part of the elasticsearch release and accessible by +This project is published as part of the Elasticsearch release and accessible by `org.elasticsearch.gradle:build-tools:`. These build tools are also used by the `elasticsearch-hadoop` project maintained by elastic. ### `build-tools-internal` -This project contains all elasticsearch project specific build logic that is not meant to be shared +This project contains all Elasticsearch project specific build logic that is not meant to be shared with other internal or external projects. ## Build guidelines This is an intentionally small set of guidelines to build users and authors -to ensure we keep the build consistent. We also publish elasticsearch build logic -as `build-tools` to be usuable by thirdparty elasticsearch plugin authors. This is +to ensure we keep the build consistent. We also publish Elasticsearch build logic +as `build-tools` to be usable by thirdparty Elasticsearch plugin authors. This is also used by other elastic teams like `elasticsearch-hadoop`. Breaking changes should therefore be avoided and an appropriate deprecation cycle should be followed. ### Stay up to date -The elasticsearch build usually uses the latest Gradle GA release. We stay as close to the +The Elasticsearch build usually uses the latest Gradle GA release. We stay as close to the latest Gradle releases as possible. In certain cases an update is blocked by a breaking behaviour -in Gradle. We're usually in contact with the gradle team here or working on a fix +in Gradle. We're usually in contact with the Gradle team here or working on a fix in our build logic to resolve this. **The Elasticsearch build will fail if any deprecated Gradle API is used.** ### Follow Gradle best practices -Tony Robalik has compiled a good list of rules that aligns with ours when it comes to writing and maintaining elasticsearch -gradle build logic at http://autonomousapps.com/blog/rules-for-gradle-plugin-authors.html. +Tony Robalik has compiled a good list of rules that aligns with ours when it comes to writing and maintaining Elasticsearch +Gradle build logic at http://autonomousapps.com/blog/rules-for-gradle-plugin-authors.html. Our current build does not yet tick off all those rules everywhere but the ultimate goal is to follow these principles. -The reasons for following those rules besides better readability or maintenance are also the goal to support newer gradle +The reasons for following those rules besides better readability or maintenance are also the goal to support newer Gradle features that we will benefit from in terms of performance and reliability. E.g. [configuration-cache support](https://github.com/elastic/elasticsearch/issues/57918), [Project Isolation]([https://gradle.github.io/configuration-cache/#project_isolation) or [predictive test selection](https://gradle.com/gradle-enterprise-solutions/predictive-test-selection/) ### Make a change in the build -There are a few guidelines to follow that should make your life easier to make changes to the elasticsearch build. +There are a few guidelines to follow that should make your life easier to make changes to the Elasticsearch build. Please add a member of the `es-delivery` team as a reviewer if you're making non-trivial changes to the build. #### Adding or updating a dependency @@ -93,13 +93,13 @@ We prefer sha256 checksums as md5 and sha1 are not considered safe anymore these will have the `origin` attribute been set to `Generated by Gradle`. >A manual confirmation of the Gradle generated checksums is currently not mandatory. ->If you want to add a level of verification you can manually confirm the checksum (e.g by looking it up on the website of the library) +>If you want to add a level of verification you can manually confirm the checksum (e.g. by looking it up on the website of the library) >Please replace the content of the `origin` attribute by `official site` in that case. > -#### Custom Plugin and Task implementations +#### Custom plugin and task implementations -Build logic that is used across multiple subprojects should considered to be moved into a Gradle plugin with according Gradle task implmentation. +Build logic that is used across multiple subprojects should be considered to be moved into a Gradle plugin with according Gradle task implementation. Elasticsearch specific build logic is located in the `build-tools-internal` subproject including integration tests. - Gradle plugins and Tasks should be written in Java @@ -108,7 +108,7 @@ Elasticsearch specific build logic is located in the `build-tools-internal` subp #### Declaring tasks -The elasticsearch build makes use of the [task avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html) to keep the configuration time of the build low. +The Elasticsearch build makes use of the [task avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html) to keep the configuration time of the build low. When declaring tasks (in build scripts or custom plugins) this means that we want to _register_ a task like: @@ -118,18 +118,18 @@ instead of eagerly _creating_ the task: task someTask { ... } -The major difference between these two syntaxes is, that the configuration block of an registered task will only be executed when the task is actually created due to the build requires that task to run. The configuration block of an eagerly created tasks will be executed immediately. +The major difference between these two syntaxes is, that the configuration block of a registered task will only be executed when the task is actually created due to the build requires that task to run. The configuration block of an eagerly created tasks will be executed immediately. -By actually doing less in the gradle configuration time as only creating tasks that are requested as part of the build and by only running the configurations for those requested tasks, using the task avoidance api contributes a major part in keeping our build fast. +By actually doing less in the Gradle configuration time as only creating tasks that are requested as part of the build and by only running the configurations for those requested tasks, using the task avoidance api contributes a major part in keeping our build fast. #### Registering test clusters -When using the elasticsearch test cluster plugin we want to use (similar to the task avoidance API) a Gradle API to create domain objects lazy or only if required by the build. +When using the Elasticsearch test cluster plugin we want to use (similar to the task avoidance API) a Gradle API to create domain objects lazy or only if required by the build. Therefore we register test cluster by using the following syntax: def someClusterProvider = testClusters.register('someCluster') { ... } -This registers a potential testCluster named `somecluster` and provides a provider instance, but doesn't create it yet nor configures it. This makes the gradle configuration phase more efficient by +This registers a potential testCluster named `somecluster` and provides a provider instance, but doesn't create it yet nor configures it. This makes the Gradle configuration phase more efficient by doing less. To wire this registered cluster into a `TestClusterAware` task (e.g. `RestIntegTest`) you can resolve the actual cluster from the provider instance: @@ -139,23 +139,23 @@ To wire this registered cluster into a `TestClusterAware` task (e.g. `RestIntegT nonInputProperties.systemProperty 'tests.leader_host', "${-> someClusterProvider.get().getAllHttpSocketURI().get(0)}" } -#### Adding additional integration tests +#### Adding integration tests -Additional integration tests for a certain elasticsearch modules that are specific to certain cluster configuration can be declared in a separate so called `qa` subproject of your module. +Additional integration tests for a certain Elasticsearch modules that are specific to certain cluster configuration can be declared in a separate so called `qa` subproject of your module. The benefit of a dedicated project for these tests are: -- `qa` projects are dedicated two specific usecases and easier to maintain +- `qa` projects are dedicated two specific use-cases and easier to maintain - It keeps the specific test logic separated from the common test logic. - You can run those tests in parallel to other projects of the build. #### Using test fixtures -Sometimes we want to share test fixtures to setup the code under test across multiple projects. There are basically two ways doing so. +Sometimes we want to share test fixtures to set up the code under test across multiple projects. There are basically two ways doing so. -Ideally we would use the build-in [java-test-fixtures](https://docs.gradle.org/current/userguide/java_testing.html#sec:java_test_fixtures) gradle plugin. +Ideally we would use the build-in [java-test-fixtures](https://docs.gradle.org/current/userguide/java_testing.html#sec:java_test_fixtures) Gradle plugin. This plugin relies on having a separate sourceSet for the test fixtures code. -In the elasticsearch codebase we have test fixtures and actual tests within the same sourceSet. Therefore we introduced the `elasticsearch.internal-test-artifact` plugin to provides another build artifact of your project based on the `test` sourceSet. +In the Elasticsearch codebase we have test fixtures and actual tests within the same sourceSet. Therefore we introduced the `elasticsearch.internal-test-artifact` plugin to provides another build artifact of your project based on the `test` sourceSet. This artifact can be resolved by the consumer project as shown in the example below: @@ -168,9 +168,9 @@ dependencies { ``` This test artifact mechanism makes use of the concept of [component capabilities](https://docs.gradle.org/current/userguide/component_capabilities.html) -similar to how the gradle build-in `java-test-fixtures` plugin works. +similar to how the Gradle build-in `java-test-fixtures` plugin works. -`testArtifact` is a shortcut declared in the elasticsearch build. Alternatively you can declare the dependency via +`testArtifact` is a shortcut declared in the Elasticsearch build. Alternatively you can declare the dependency via ``` dependencies { @@ -186,7 +186,7 @@ dependencies { To test an unreleased development version of a third party dependency you have several options. -#### How to use a maven based third party dependency via mavenlocal? +#### How to use a Maven based third party dependency via `mavenlocal`? 1. Clone the third party repository locally 2. Run `mvn install` to install copy into your `~/.m2/repository` folder. @@ -200,16 +200,15 @@ To test an unreleased development version of a third party dependency you have s } ``` 4. Update the version in your dependency declaration accordingly (likely a snapshot version) -5. Run the gradle build as needed +5. Run the Gradle build as needed -#### How to use a maven built based third party dependency with jitpack repository? +#### How to use a Maven built based third party dependency with JitPack repository? -https://jitpack.io is an adhoc repository that supports building maven projects transparently in the background when -resolving unreleased snapshots from a github repository. This approach also works as temporally solution +https://jitpack.io is an adhoc repository that supports building Maven projects transparently in the background when +resolving unreleased snapshots from a GitHub repository. This approach also works as temporally solution and is compliant with our CI builds. 1. Add the JitPack repository to the root build file: - ``` allprojects { repositories { @@ -227,7 +226,7 @@ dependencies { As version you could also use a certain short commit hash or `main-SNAPSHOT`. In addition to snapshot builds JitPack supports building Pull Requests. Simply use PR-SNAPSHOT as the version. -3. Run the gradle build as needed. Keep in mind the initial resolution might take a bit longer as this needs to be built +3. Run the Gradle build as needed. Keep in mind the initial resolution might take a bit longer as this needs to be built by JitPack in the background before we can resolve the adhoc built dependency. --- @@ -240,7 +239,7 @@ not want to ship unreleased libraries into our releases. #### How to use a custom third party artifact? -For third party libraries that are not built with maven (e.g. ant) or provided as a plain jar artifact we can leverage +For third party libraries that are not built with Maven (e.g. Ant) or provided as a plain jar artifact we can leverage a flat directory repository that resolves artifacts from a flat directory on your filesystem. 1. Put the jar artifact with the format `artifactName-version.jar` into a directory named `localRepo` (you have to create this manually) @@ -264,7 +263,7 @@ allprojects { implementation 'x:jmxri:1.2.1' } ``` -4. Run the gradle build as needed with `--write-verification-metadata` to ensure the gradle dependency verification does not fail on your custom dependency. +4. Run the Gradle build as needed with `--write-verification-metadata` to ensure the Gradle dependency verification does not fail on your custom dependency. --- **NOTE** @@ -273,5 +272,5 @@ As Gradle prefers to use modules whose descriptor has been created from real met flat directory repositories cannot be used to override artifacts with real meta-data from other repositories declared in the build. For example, if Gradle finds only `jmxri-1.2.1.jar` in a flat directory repository, but `jmxri-1.2.1.pom` in another repository that supports meta-data, it will use the second repository to provide the module. -Therefore, it is recommended to declare a version that is not resolvable from public repositories we use (e.g. maven central) +Therefore, it is recommended to declare a version that is not resolvable from public repositories we use (e.g. Maven Central) --- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f9f432bca467..db8cca17a5606 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -Contributing to elasticsearch +Contributing to Elasticsearch ============================= Elasticsearch is a free and open project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself. @@ -54,7 +54,7 @@ The process for contributing to any of the [Elastic repositories](https://github ### Fork and clone the repository You will need to fork the main Elasticsearch code or documentation repository and clone it to your local machine. See -[github help page](https://help.github.com/articles/fork-a-repo) for help. +[GitHub help page](https://help.github.com/articles/fork-a-repo) for help. Further instructions for specific projects are given below. @@ -69,7 +69,7 @@ cycle. * Lines that are not part of your change should not be edited (e.g. don't format unchanged lines, don't reorder existing imports) * Add the appropriate [license headers](#license-headers) to any new files -* For contributions involving the elasticsearch build you can find details about the build setup in the +* For contributions involving the Elasticsearch build you can find details about the build setup in the [BUILDING](BUILDING.md) file ### Submitting your changes @@ -89,7 +89,6 @@ Once your changes and tests are ready to submit for review: Update your local repository with the most recent code from the main Elasticsearch repository, and rebase your branch on top of the latest main branch. We prefer your initial changes to be squashed into a single commit. Later, if we ask you to make changes, add them as separate commits. This makes them easier to review. As a final step before merging we will either ask you to squash all commits yourself or we'll do it for you. - 4. Submit a pull request Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). In the pull request, choose a title which sums up the changes that you have made, and in the body provide more details about what your changes do. Also mention the number of the issue where discussion has taken place, eg "Closes #123". @@ -121,8 +120,7 @@ using the wrapper via the `gradlew` script on Unix systems or `gradlew.bat` script on Windows in the root of the repository. The examples below show the usage on Unix. -We support development in IntelliJ versions IntelliJ 2020.1 and -onwards. +We support development in [IntelliJ IDEA] versions 2020.1 and onwards. [Docker](https://docs.docker.com/install/) is required for building some Elasticsearch artifacts and executing certain test suites. You can run Elasticsearch without building all the artifacts with: @@ -135,7 +133,7 @@ specifically these lines tell you that Elasticsearch is ready: [2020-05-29T14:50:35,167][INFO ][o.e.h.AbstractHttpServerTransport] [runTask-0] publish_address {127.0.0.1:9200}, bound_addresses {[::1]:9200}, {127.0.0.1:9200} [2020-05-29T14:50:35,169][INFO ][o.e.n.Node ] [runTask-0] started -But to be honest its typically easier to wait until the console stops scrolling +But to be honest it's typically easier to wait until the console stops scrolling and then run `curl` in another window like this: curl -u elastic:password localhost:9200 @@ -143,7 +141,7 @@ and then run `curl` in another window like this: ### Importing the project into IntelliJ IDEA -The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1 +The minimum IntelliJ IDEA version required to import the Elasticsearch project is 2020.1. Elasticsearch builds using Java 17. When importing into IntelliJ you will need to define an appropriate SDK. The convention is that **this SDK should be named "17"** so that the project import will detect it automatically. For more details @@ -173,7 +171,7 @@ action is required. #### Formatting -Elasticsearch code is automatically formatted with [spotless], backed by the +Elasticsearch code is automatically formatted with [Spotless], backed by the Eclipse formatter. You can do the same in IntelliJ with the [Eclipse Code Formatter] so that you can apply the correct formatting directly in your IDE. The configuration for the plugin is held in @@ -198,7 +196,7 @@ Alternative manual steps for IntelliJ. 3. Navigate to the file `build-conventions/formatterConfig.xml` 4. Click "OK" -### REST Endpoint Conventions +### REST endpoint conventions Elasticsearch typically uses singular nouns rather than plurals in URLs. For example: @@ -214,7 +212,7 @@ but not: You may find counterexamples, but new endpoints should use the singular form. -### Java Language Formatting Guidelines +### Java language formatting guidelines Java files in the Elasticsearch codebase are automatically formatted using the [Spotless Gradle] plugin. All new projects are automatically formatted, @@ -249,13 +247,13 @@ Please follow these formatting guidelines: only do this where the benefit clearly outweighs the decrease in formatting consistency. * Note that Javadoc and block comments i.e. `/* ... */` are not formatted, - but line comments i.e `// ...` are. + but line comments i.e. `// ...` are. * Negative boolean expressions must use the form `foo == false` instead of `!foo` for better readability of the code. This is enforced via Checkstyle. Conversely, you should not write e.g. `if (foo == true)`, but just `if (foo)`. -#### Editor / IDE Support +#### Editor / IDE support IntelliJ IDEs can [import](https://blog.jetbrains.com/idea/2014/01/intellij-idea-13-importing-code-formatter-settings-from-eclipse/) @@ -316,7 +314,7 @@ is to be helpful, not to turn writing code into a chore. this is critical to understanding the code e.g. documenting the subtleties of the implementation of a private method. The point here is that implementations will change over time, and the Javadoc is - less likely to become out-of-date if it only talks about the what is + less likely to become out-of-date if it only talks about the purpose of the code, not what it does. 8. Examples in Javadoc can be very useful, so feel free to add some if you can reasonably do so i.e. if it takes a whole page of code to set @@ -362,7 +360,7 @@ Finally, use your judgement! Base your decisions on what will help other developers - including yourself, when you come back to some code 3 months in the future, having forgotten how it works. -### License Headers +### License headers We require license headers on all Java files. With the exception of the top-level `x-pack` directory, all contributed code should have the following @@ -433,7 +431,7 @@ In rare situations you may want to configure your `Logger` slightly differently, perhaps specifying a different class or maybe using one of the methods on `org.elasticsearch.common.logging.Loggers` instead. -If the log message includes values from your code then you must use use +If the log message includes values from your code then you must use placeholders rather than constructing the string yourself using simple concatenation. Consider wrapping the values in `[...]` to help distinguish them from the static part of the message: @@ -461,18 +459,18 @@ unit tests, especially if there is complex logic for computing what is logged and when to log it. You can use a `org.elasticsearch.test.MockLogAppender` to make assertions about the logs that are being emitted. -Logging is a powerful diagnostic technique but it is not the only possibility. +Logging is a powerful diagnostic technique, but it is not the only possibility. You should also consider exposing some information about your component via an -API instead of in logs. For instance you can implement APIs to report its +API instead of in logs. For instance, you can implement APIs to report its current status, various statistics, and maybe even details of recent failures. #### Log levels -Each log message is written at a particular _level_. By default Elasticsearch +Each log message is written at a particular _level_. By default, Elasticsearch will suppress messages at the two most verbose levels, `TRACE` and `DEBUG`, and will output messages at all other levels. Users can configure which levels of message are written by each logger at runtime, but you should expect everyone -to run with the default configuration almost all of the time and choose your +to run with the default configuration almost all the time and choose your levels accordingly. The guidance in this section is subjective in some areas. When in doubt, @@ -570,7 +568,7 @@ an index template is created or updated: `INFO`-level logging is enabled by default so its target audience is the general population of users and administrators. You should use user-facing terminology and ensure that messages at this level are self-contained. In -general you shouldn't log unusual events, particularly exceptions with stack +general, you shouldn't log unusual events, particularly exceptions with stack traces, at `INFO` level. If the event is relatively benign then use `DEBUG`, whereas if the user should be notified then use `WARN`. @@ -629,7 +627,7 @@ the logs. ##### `ERROR` -This is the next least verbose level after `WARN`. In theory it is possible for +This is the next least verbose level after `WARN`. In theory, it is possible for users to suppress messages at `WARN` and below, believing this to help them focus on the most important `ERROR` messages, but in practice in Elasticsearch this will hide so much useful information that the resulting logs will be @@ -660,7 +658,7 @@ numbering scheme separate to release version. The main ones are inter-node binary protocol and index data + metadata respectively. Separated version numbers are comprised of an integer number. The semantic -meaing of a version number are defined within each `*Version` class. There +meaning of a version number are defined within each `*Version` class. There is no direct mapping between separated version numbers and the release version. The versions used by any particular instance of Elasticsearch can be obtained by querying `/_nodes/info` on the node. @@ -675,13 +673,29 @@ number, there are a few rules that need to be followed: once merged into `main`. 2. To create a new component version, add a new constant to the respective class with a descriptive name of the change being made. Increment the integer - number according to the partciular `*Version` class. + number according to the particular `*Version` class. If your pull request has a conflict around your new version constant, you need to update your PR from `main` and change your PR to use the next available version number. -### Creating A Distribution +### Checking for cluster features + +As part of developing a new feature or change, you might need to determine +if all nodes in a cluster have been upgraded to support your new feature. +This can be done using `FeatureService`. To define and check for a new +feature in a cluster: + +1. Define a new `NodeFeature` constant with a unique id for the feature + in a class related to the change you're doing. +2. Return that constant from an instance of `FeatureSpecification.getFeatures`, + either an existing implementation or a new implementation. Make sure + the implementation is added as an SPI implementation in `module-info.java` + and `META-INF/services`. +3. To check if all nodes in the cluster support the new feature, call +`FeatureService.clusterHasFeature(ClusterState, NodeFeature)` + +### Creating a distribution Run all build commands from within the root directory: @@ -711,7 +725,7 @@ The archive distributions (tar and zip) can be found under: ./distribution/archives/(darwin-tar|linux-tar|windows-zip|oss-darwin-tar|oss-linux-tar|oss-windows-zip)/build/distributions/ -### Running The Full Test Suite +### Running the full test suite Before submitting your changes, run the test suite to make sure that nothing is broken, with: @@ -736,14 +750,14 @@ a test that passes locally, may actually fail later due to random settings or data input. To make tests repeatable, a `REPRODUCE` line in CI will also include the `-Dtests.seed` parameter. -When running locally, gradle does its best to take advantage of cached results. +When running locally, Gradle does its best to take advantage of cached results. So, if the code is unchanged, running the same test with the same `-Dtests.seed` repeatedly may not actually run the test if it has passed with that seed in the previous execution. A way around this is to pass a separate parameter -to adjust the command options seen by gradle. +to adjust the command options seen by Gradle. A simple option may be to add the parameter `-Dtests.timestamp=$(date +%s)` which will give the current time stamp as a parameter, thus making the parameters -sent to gradle unique and bypassing the cache. +sent to Gradle unique and bypassing the cache. ### Project layout @@ -760,9 +774,9 @@ Builds our tar and zip archives and our rpm and deb packages. Libraries used to build other parts of the project. These are meant to be internal rather than general purpose. We have no plans to [semver](https://semver.org/) their APIs or accept feature requests for them. -We publish them to maven central because they are dependencies of our plugin -test framework, high level rest client, and jdbc driver but they really aren't -general purpose enough to *belong* in maven central. We're still working out +We publish them to Maven Central because they are dependencies of our plugin +test framework, high level rest client, and jdbc driver, but they really aren't +general purpose enough to *belong* in Maven Central. We're still working out what to do here. #### `modules` @@ -773,7 +787,7 @@ they depend on libraries that we don't believe *all* of Elasticsearch should depend on. For example, reindex requires the `connect` permission so it can perform -reindex-from-remote but we don't believe that the *all* of Elasticsearch should +reindex-from-remote, but we don't believe that the *all* of Elasticsearch should have the "connect". For another example, Painless is implemented using antlr4 and asm and we don't believe that *all* of Elasticsearch should have access to them. @@ -812,7 +826,7 @@ qa project, open a PR and be ready to discuss options. #### `server` The server component of Elasticsearch that contains all of the modules and -plugins. Right now things like the high level rest client depend on the server +plugins. Right now things like the high level rest client depend on the server, but we'd like to fix that in the future. #### `test` @@ -832,7 +846,7 @@ the `qa` subdirectory functions just like the top level `qa` subdirectory. The `plugin` subdirectory contains the x-pack module which runs inside the Elasticsearch process. -### Gradle Build +### Gradle build We use Gradle to build Elasticsearch because it is flexible enough to not only build and package Elasticsearch, but also orchestrate all of the ways that we @@ -849,16 +863,20 @@ common configurations in our build and how we use them: at compile and runtime but are not exposed as a compile dependency to other dependent projects. Dependencies added to the `implementation` configuration are considered an implementation detail that can be changed at a later date without affecting any dependent projects. +
`api`
Dependencies that are used as compile and runtime dependencies of a project - and are considered part of the external api of the project. + and are considered part of the external api of the project.
+
`runtimeOnly`
Dependencies that not on the classpath at compile time but are on the classpath at runtime. We mostly use this configuration to make sure that we do not accidentally compile against dependencies of our dependencies also known as "transitive" dependencies".
+
`compileOnly`
Code that is on the classpath at compile time but that should not be shipped with the project because it is "provided" by the runtime somehow. Elasticsearch plugins use this configuration to include dependencies that are bundled with Elasticsearch's server.
+
`testImplementation`
Code that is on the classpath for compiling tests that are part of this project but not production code. The canonical example of this is `junit`.
@@ -881,7 +899,7 @@ time is very limited. In some cases the time we would need to spend on reviews would outweigh the benefits of a change by preventing us from working on other more beneficial changes instead. -Please discuss your change in a Github issue before spending much time on its +Please discuss your change in a GitHub issue before spending much time on its implementation. We sometimes have to reject contributions that duplicate other efforts, take the wrong approach to solving a problem, or solve a problem which does not need solving. An up-front discussion often saves a good deal of wasted @@ -964,8 +982,8 @@ Finally, we require that you run `./gradlew check` before submitting a non-documentation contribution. This is mentioned above, but it is worth repeating in this section because it has come up in this context. -[intellij]: https://blog.jetbrains.com/idea/2017/07/intellij-idea-2017-2-is-here-smart-sleek-and-snappy/ +[IntelliJ IDEA]: https://www.jetbrains.com/idea/ [Checkstyle]: https://plugins.jetbrains.com/plugin/1065-checkstyle-idea -[spotless]: https://github.com/diffplug/spotless +[Spotless]: https://github.com/diffplug/spotless [Eclipse Code Formatter]: https://plugins.jetbrains.com/plugin/6546-eclipse-code-formatter [Spotless Gradle]: https://github.com/diffplug/spotless/tree/main/plugin-gradle diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 0393cf92776fa..96f94755a2758 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -45,7 +45,7 @@ run it using Gradle: ==== Launching and debugging from an IDE -If you want to run Elasticsearch from your IDE, the `./gradlew run` task +If you want to run and debug Elasticsearch from your IDE, the `./gradlew run` task supports a remote debugging option. Run the following from your terminal: --------------------------------------------------------------------------- @@ -55,7 +55,7 @@ supports a remote debugging option. Run the following from your terminal: Next start the "Debug Elasticsearch" run configuration in IntelliJ. This will enable the IDE to connect to the process and allow debug functionality. -As such the IDE needs to be instructed to listen for connections on this port. +As such the IDE needs to be instructed to listen for connections on the debug port. Since we might run multiple JVMs as part of configuring and starting the cluster it's recommended to configure the IDE to initiate multiple listening attempts. In case of IntelliJ, this option is called "Auto restart" and needs to be checked. @@ -64,6 +64,22 @@ NOTE: If you have imported the project into IntelliJ according to the instructio link:/CONTRIBUTING.md#importing-the-project-into-intellij-idea[CONTRIBUTING.md] then a debug run configuration named "Debug Elasticsearch" will be created for you and configured appropriately. +===== Debugging the CLI launcher + +The gradle task does not start the Elasticsearch server process directly; like in the Elasticsearch distribution, +the job of starting the server process is delegated to a launcher CLI tool. If you need to debug the launcher itself, +add the following option to the `run` task: +--------------------------------------------------------------------------- +./gradlew run --debug-cli-jvm +--------------------------------------------------------------------------- +This option can be specified in isolation or combined with `--debug-jvm`. Since the CLI launcher lifespan may overlap +with the server process lifespan, the CLI launcher process will be started on a different port (5107 for the first node, +5108 and following for additional cluster nodes). + +As with the `--debug-jvm` command, the IDE needs to be instructed to listen for connections on the debug port. +You need to configure and start an appropriate Remote JVM Debug configuration, e.g. by cloning and editing +the "Debug Elasticsearch" run configuration to point to the correct debug port. + ==== Disabling assertions When running Elasticsearch with `./gradlew run`, assertions are enabled by @@ -103,7 +119,8 @@ password: `elastic-password`. - In order to start a node with a different max heap space add: `-Dtests.heap.size=4G` - In order to use a custom data directory: `--data-dir=/tmp/foo` - In order to preserve data in between executions: `--preserve-data` -- In order to remotely attach a debugger to the process: `--debug-jvm` +- In order to remotely attach a debugger to the server process: `--debug-jvm` +- In order to remotely attach a debugger to the CLI launcher process: `--debug-cli-jvm` - In order to set a different keystore password: `--keystore-password` - In order to set an Elasticsearch setting, provide a setting with the following prefix: `-Dtests.es.` - In order to pass a JVM setting, e.g. to disable assertions: `-Dtests.jvm.argline="-da"` diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java index c6d5cd91e7ecb..09cdc8b269ad3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java @@ -159,18 +159,18 @@ public void setup() { @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void adaptive() { - MultivalueDedupe.dedupeToBlockAdaptive(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockAdaptive(block, BlockFactory.getNonBreakingInstance()).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyAndSort() { - MultivalueDedupe.dedupeToBlockUsingCopyAndSort(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyAndSort(block, BlockFactory.getNonBreakingInstance()).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyMissing() { - MultivalueDedupe.dedupeToBlockUsingCopyMissing(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyMissing(block, BlockFactory.getNonBreakingInstance()).close(); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java index 84f7cec47b737..d723ea3e1a6b3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java @@ -24,6 +24,7 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.topn.TopNEncoder; import org.elasticsearch.compute.operator.topn.TopNOperator; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.openjdk.jmh.annotations.Benchmark; @@ -103,6 +104,7 @@ private static Operator operator(String data, int topCount) { default -> throw new IllegalArgumentException("unsupported data type [" + data + "]"); }; CircuitBreakerService breakerService = new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, Settings.EMPTY, List.of(), ClusterSettings.createBuiltInClusterSettings() diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java index 9fa876a00c35c..afe8377d3e58c 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java @@ -8,8 +8,11 @@ package org.elasticsearch.benchmark.compute.operator; +import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -19,6 +22,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -30,14 +34,16 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.BlockReaderFactories; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.topn.TopNOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.lookup.SearchLookup; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -56,7 +62,9 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.PrimitiveIterator; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.IntStream; @@ -93,18 +101,118 @@ public class ValuesSourceReaderBenchmark { } } - private static BlockLoader blockLoader(String name) { + private static List fields(String name) { return switch (name) { - case "long" -> numericBlockLoader(name, NumberFieldMapper.NumberType.LONG); - case "int" -> numericBlockLoader(name, NumberFieldMapper.NumberType.INTEGER); - case "double" -> numericBlockLoader(name, NumberFieldMapper.NumberType.DOUBLE); - case "keyword" -> new KeywordFieldMapper.KeywordFieldType(name).blockLoader(null); - default -> throw new IllegalArgumentException("can't read [" + name + "]"); + case "3_stored_keywords" -> List.of( + new ValuesSourceReaderOperator.FieldInfo("keyword_1", List.of(blockLoader("stored_keyword_1"))), + new ValuesSourceReaderOperator.FieldInfo("keyword_2", List.of(blockLoader("stored_keyword_2"))), + new ValuesSourceReaderOperator.FieldInfo("keyword_3", List.of(blockLoader("stored_keyword_3"))) + ); + default -> List.of(new ValuesSourceReaderOperator.FieldInfo(name, List.of(blockLoader(name)))); }; } - private static BlockLoader numericBlockLoader(String name, NumberFieldMapper.NumberType numberType) { - return new NumberFieldMapper.NumberFieldType(name, numberType).blockLoader(null); + enum Where { + DOC_VALUES, + SOURCE, + STORED; + } + + private static BlockLoader blockLoader(String name) { + Where where = Where.DOC_VALUES; + if (name.startsWith("stored_")) { + name = name.substring("stored_".length()); + where = Where.STORED; + } else if (name.startsWith("source_")) { + name = name.substring("source_".length()); + where = Where.SOURCE; + } + switch (name) { + case "long": + return numericBlockLoader(name, where, NumberFieldMapper.NumberType.LONG); + case "int": + return numericBlockLoader(name, where, NumberFieldMapper.NumberType.INTEGER); + case "double": + return numericBlockLoader(name, where, NumberFieldMapper.NumberType.DOUBLE); + case "keyword": + name = "keyword_1"; + } + if (name.startsWith("keyword")) { + boolean syntheticSource = false; + FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + switch (where) { + case DOC_VALUES: + break; + case SOURCE: + ft.setDocValuesType(DocValuesType.NONE); + break; + case STORED: + ft.setStored(true); + ft.setDocValuesType(DocValuesType.NONE); + syntheticSource = true; + break; + } + ft.freeze(); + return new KeywordFieldMapper.KeywordFieldType( + name, + ft, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + Lucene.KEYWORD_ANALYZER, + new KeywordFieldMapper.Builder(name, IndexVersion.current()).docValues(ft.docValuesType() != DocValuesType.NONE), + syntheticSource + ).blockLoader(new MappedFieldType.BlockLoaderContext() { + @Override + public String indexName() { + return "benchmark"; + } + + @Override + public SearchLookup lookup() { + throw new UnsupportedOperationException(); + } + + @Override + public Set sourcePaths(String name) { + return Set.of(name); + } + + @Override + public String parentField(String field) { + throw new UnsupportedOperationException(); + } + }); + } + throw new IllegalArgumentException("can't read [" + name + "]"); + } + + private static BlockLoader numericBlockLoader(String name, Where where, NumberFieldMapper.NumberType numberType) { + boolean stored = false; + boolean docValues = true; + switch (where) { + case DOC_VALUES: + break; + case SOURCE: + stored = true; + docValues = false; + break; + case STORED: + throw new UnsupportedOperationException(); + } + return new NumberFieldMapper.NumberFieldType( + name, + numberType, + true, + stored, + docValues, + true, + null, + Map.of(), + null, + false, + null, + null + ).blockLoader(null); } /** @@ -122,7 +230,7 @@ private static BlockLoader numericBlockLoader(String name, NumberFieldMapper.Num @Param({ "in_order", "shuffled", "shuffled_singles" }) public String layout; - @Param({ "long", "int", "double", "keyword" }) + @Param({ "long", "int", "double", "keyword", "stored_keyword", "3_stored_keywords" }) public String name; private Directory directory; @@ -134,9 +242,11 @@ private static BlockLoader numericBlockLoader(String name, NumberFieldMapper.Num public void benchmark() { ValuesSourceReaderOperator op = new ValuesSourceReaderOperator( BlockFactory.getNonBreakingInstance(), - List.of(BlockReaderFactories.loaderToFactory(reader, blockLoader(name))), - 0, - name + fields(name), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> { + throw new UnsupportedOperationException("can't load _source here"); + })), + 0 ); long sum = 0; for (Page page : pages) { @@ -160,7 +270,7 @@ public void benchmark() { sum += (long) values.getDouble(p); } } - case "keyword" -> { + case "keyword", "stored_keyword" -> { BytesRef scratch = new BytesRef(); BytesRefVector values = op.getOutput().getBlock(1).asVector(); for (int p = 0; p < values.getPositionCount(); p++) { @@ -170,21 +280,59 @@ public void benchmark() { sum += Integer.parseInt(r.utf8ToString()); } } + case "3_stored_keywords" -> { + BytesRef scratch = new BytesRef(); + Page out = op.getOutput(); + for (BytesRefVector values : new BytesRefVector[] { + out.getBlock(1).asVector(), + out.getBlock(2).asVector(), + out.getBlock(3).asVector() }) { + + for (int p = 0; p < values.getPositionCount(); p++) { + BytesRef r = values.getBytesRef(p, scratch); + r.offset++; + r.length--; + sum += Integer.parseInt(r.utf8ToString()); + } + } + } } } - long expected; - if (name.equals("keyword")) { - expected = 0; - for (int i = 0; i < INDEX_SIZE; i++) { - expected += i % 1000; - } - } else { - expected = INDEX_SIZE; - expected = expected * (expected - 1) / 2; + long expected = 0; + switch (name) { + case "keyword", "stored_keyword": + for (int i = 0; i < INDEX_SIZE; i++) { + expected += i % 1000; + } + break; + case "3_stored_keywords": + for (int i = 0; i < INDEX_SIZE; i++) { + expected += 3 * (i % 1000); + } + break; + default: + expected = INDEX_SIZE; + expected = expected * (expected - 1) / 2; } if (expected != sum) { throw new AssertionError("[" + layout + "][" + name + "] expected [" + expected + "] but was [" + sum + "]"); } + boolean foundStoredFieldLoader = false; + ValuesSourceReaderOperator.Status status = (ValuesSourceReaderOperator.Status) op.status(); + for (Map.Entry e : status.readersBuilt().entrySet()) { + if (e.getKey().indexOf("stored_fields") >= 0) { + foundStoredFieldLoader = true; + } + } + if (name.indexOf("stored") >= 0) { + if (foundStoredFieldLoader == false) { + throw new AssertionError("expected to use a stored field loader but only had: " + status.readersBuilt()); + } + } else { + if (foundStoredFieldLoader) { + throw new AssertionError("expected not to use a stored field loader but only had: " + status.readersBuilt()); + } + } } @Setup @@ -195,15 +343,23 @@ public void setup() throws IOException { private void setupIndex() throws IOException { directory = new ByteBuffersDirectory(); + FieldType keywordFieldType = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); + keywordFieldType.setStored(true); + keywordFieldType.freeze(); try (IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE))) { for (int i = 0; i < INDEX_SIZE; i++) { String c = Character.toString('a' - ((i % 1000) % 26) + 26); iw.addDocument( List.of( new NumericDocValuesField("long", i), + new StoredField("long", i), new NumericDocValuesField("int", i), + new StoredField("int", i), new NumericDocValuesField("double", NumericUtils.doubleToSortableLong(i)), - new KeywordFieldMapper.KeywordField("keyword", new BytesRef(c + i % 1000), KeywordFieldMapper.Defaults.FIELD_TYPE) + new StoredField("double", (double) i), + new KeywordFieldMapper.KeywordField("keyword_1", new BytesRef(c + i % 1000), keywordFieldType), + new KeywordFieldMapper.KeywordField("keyword_2", new BytesRef(c + i % 1000), keywordFieldType), + new KeywordFieldMapper.KeywordField("keyword_3", new BytesRef(c + i % 1000), keywordFieldType) ) ); if (i % COMMIT_INTERVAL == 0) { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java index 5b139f800cb39..8e60a7435cbc7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/AggConstructionContentionBenchmark.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.support.NestedScope; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -108,7 +109,12 @@ public class AggConstructionContentionBenchmark { @Setup public void setup() { breakerService = switch (breaker) { - case "real", "preallocate" -> new HierarchyCircuitBreakerService(Settings.EMPTY, List.of(), clusterSettings); + case "real", "preallocate" -> new HierarchyCircuitBreakerService( + CircuitBreakerMetrics.NOOP, + Settings.EMPTY, + List.of(), + clusterSettings + ); case "noop" -> new NoneCircuitBreakerService(); default -> throw new UnsupportedOperationException(); }; diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index c134638bcd6b6..66001e66f2486 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -35,6 +35,10 @@ gradlePlugin { id = 'elasticsearch.build' implementationClass = 'org.elasticsearch.gradle.internal.BuildPlugin' } + buildComplete { + id = 'elasticsearch.build-complete' + implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchBuildCompletePlugin' + } distro { id = 'elasticsearch.distro' implementationClass = 'org.elasticsearch.gradle.internal.distribution.ElasticsearchDistributionPlugin' @@ -158,7 +162,7 @@ gradlePlugin { stringTemplate { id = 'elasticsearch.string-templates' implementationClass = 'org.elasticsearch.gradle.internal.StringTemplatePlugin' - } + } testFixtures { id = 'elasticsearch.test.fixtures' implementationClass = 'org.elasticsearch.gradle.internal.testfixtures.TestFixturesPlugin' @@ -266,6 +270,8 @@ dependencies { api buildLibs.apache.rat api buildLibs.jna api buildLibs.shadow.plugin + api buildLibs.gradle.enterprise + // for our ide tweaking api buildLibs.idea.ext // When upgrading forbidden apis, ensure dependency version is bumped in ThirdPartyPrecommitPlugin as well @@ -280,6 +286,7 @@ dependencies { api buildLibs.asm.tree api buildLibs.httpclient api buildLibs.httpcore + compileOnly buildLibs.checkstyle runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation buildLibs.checkstyle diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy index 2756b9745bc7f..21582b6823b81 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy @@ -31,7 +31,8 @@ abstract class AbstractRestResourcesFuncTest extends AbstractGradleFuncTest { } """ - subProject(":distribution:archives:integ-test-zip") << "configurations { extracted }" + subProject(":distribution:archives:integ-test-zip") << "configurations.create('extracted')\n" + subProject(":distribution:archives:integ-test-zip") << "configurations.create('default')\n" } void setupRestResources(List apis, List tests = [], List xpackTests = []) { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy index e31594ad2e4a6..96e342e995a36 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy @@ -31,7 +31,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright @@ -39,7 +39,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. - + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. @@ -58,11 +58,11 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { id 'java' id 'elasticsearch.global-build-info' } - + apply plugin:'elasticsearch.build' group = 'org.acme' description = "some example project" - + repositories { maven { name = "local-test" @@ -73,7 +73,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { } mavenCentral() } - + dependencies { jarHell 'org.elasticsearch:elasticsearch-core:current' } @@ -89,7 +89,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { * Side Public License, v 1. */ package org.elasticsearch; - + public class SampleClass { } """.stripIndent() @@ -117,7 +117,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { noticeFile.set(file("NOTICE")) """ when: - def result = gradleRunner("assemble").build() + def result = gradleRunner("assemble", "-x", "generateHistoricalFeaturesMetadata").build() then: result.task(":assemble").outcome == TaskOutcome.SUCCESS file("build/distributions/hello-world.jar").exists() @@ -146,7 +146,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { } licenseFile.set(file("LICENSE")) noticeFile.set(file("NOTICE")) - + tasks.named("forbiddenApisMain").configure {enabled = false } tasks.named('checkstyleMain').configure { enabled = false } tasks.named('loggerUsageCheck').configure { enabled = false } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index e17f9c7537777..9d32eaadf7aec 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -29,7 +29,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.java' id 'elasticsearch.publish' } - + version = "1.0" group = 'org.acme' description = "custom project description" @@ -92,11 +92,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.publish' id 'com.github.johnrengelman.shadow' } - + repositories { mavenCentral() } - + dependencies { implementation 'org.slf4j:log4j-over-slf4j:1.7.30' shadow 'org.slf4j:slf4j-api:1.7.30' @@ -110,8 +110,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } } version = "1.0" - group = 'org.acme' - description = 'some description' + group = 'org.acme' + description = 'some description' """ when: @@ -179,7 +179,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } dependencies { - shadow project(":someLib") + shadow project(":someLib") } publishing { repositories { @@ -192,10 +192,10 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { allprojects { apply plugin: 'elasticsearch.java' version = "1.0" - group = 'org.acme' + group = 'org.acme' } - description = 'some description' + description = 'some description' """ when: @@ -263,13 +263,13 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.publish' id 'com.github.johnrengelman.shadow' } - + esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' description = "custom project description" } - + publishing { repositories { maven { @@ -277,17 +277,17 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } } } - + // requires elasticsearch artifact available tasks.named('bundlePlugin').configure { enabled = false } licenseFile.set(file('license.txt')) noticeFile.set(file('notice.txt')) version = "1.0" - group = 'org.acme' + group = 'org.acme' """ when: - def result = gradleRunner('assemble', '--stacktrace').build() + def result = gradleRunner('assemble', '--stacktrace', '-x', 'generateHistoricalFeaturesMetadata').build() then: result.task(":generatePom").outcome == TaskOutcome.SUCCESS @@ -348,19 +348,19 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.internal-es-plugin' id 'elasticsearch.publish' } - + esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' description = "custom project description" } - + // requires elasticsearch artifact available tasks.named('bundlePlugin').configure { enabled = false } licenseFile.set(file('license.txt')) noticeFile.set(file('notice.txt')) version = "2.0" - group = 'org.acme' + group = 'org.acme' """ when: @@ -420,9 +420,9 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { apply plugin:'elasticsearch.publish' version = "1.0" - group = 'org.acme' + group = 'org.acme' description = "just a test project" - + ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) """ diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy index 888c0cc83fc15..94fa329af1715 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy @@ -24,8 +24,6 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { def "yamlRestTest does nothing when there are no tests"() { given: - // RestIntegTestTask not cc compatible due to - configurationCacheCompatible = false buildFile << """ plugins { id 'elasticsearch.legacy-yaml-rest-test' @@ -43,8 +41,6 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { def "yamlRestTest executes and copies api and tests to correct source set"() { given: - // RestIntegTestTask not cc compatible due to - configurationCacheCompatible = false internalBuild() buildFile << """ apply plugin: 'elasticsearch.legacy-yaml-rest-test' @@ -56,9 +52,10 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { // can't actually spin up test cluster from this test tasks.withType(Test).configureEach{ enabled = false } + def clazzpath = sourceSets.yamlRestTest.runtimeClasspath tasks.register("printYamlRestTestClasspath").configure { doLast { - println sourceSets.yamlRestTest.runtimeClasspath.asPath + println clazzpath.asPath } } """ diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle deleted file mode 100644 index 1a0afe6d7d344..0000000000000 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -import org.elasticsearch.gradle.util.GradleUtils - -import java.nio.file.Files - -String buildNumber = System.getenv('BUILD_NUMBER') ?: System.getenv('BUILDKITE_BUILD_NUMBER') -String performanceTest = System.getenv('BUILD_PERFORMANCE_TEST') -Boolean isNested = System.getProperty("scan.tag.NESTED") != null - -if (buildNumber && performanceTest == null && GradleUtils.isIncludedBuild(project) == false && isNested == false) { - def uploadFilePath = "build/${buildNumber}.tar.bz2" - File uploadFile = file(uploadFilePath) - project.gradle.buildFinished { result -> - println "build complete, generating: $uploadFile" - if (uploadFile.exists()) { - project.delete(uploadFile) - } - - try { - ant.tar(destfile: uploadFile, compression: "bzip2", longfile: "gnu") { - fileset(dir: projectDir) { - Set fileSet = fileTree(projectDir) { - include("**/*.hprof") - include("**/build/test-results/**/*.xml") - include("**/build/testclusters/**") - include("**/build/testrun/*/temp/**") - include("**/build/**/hs_err_pid*.log") - exclude("**/build/testclusters/**/data/**") - exclude("**/build/testclusters/**/distro/**") - exclude("**/build/testclusters/**/repo/**") - exclude("**/build/testclusters/**/extract/**") - exclude("**/build/testclusters/**/tmp/**") - exclude("**/build/testrun/*/temp/**/data/**") - exclude("**/build/testrun/*/temp/**/distro/**") - exclude("**/build/testrun/*/temp/**/repo/**") - exclude("**/build/testrun/*/temp/**/extract/**") - exclude("**/build/testrun/*/temp/**/tmp/**") - } - .files - .findAll { Files.isRegularFile(it.toPath()) } - - if (fileSet.empty) { - // In cases where we don't match any workspace files, exclude everything - ant.exclude(name: "**/*") - } else { - fileSet.each { - ant.include(name: projectDir.toPath().relativize(it.toPath())) - } - } - } - - fileset(dir: "${gradle.gradleUserHomeDir}/daemon/${gradle.gradleVersion}", followsymlinks: false) { - include(name: "**/daemon-${ProcessHandle.current().pid()}*.log") - } - - fileset(dir: "${gradle.gradleUserHomeDir}/workers", followsymlinks: false) - - fileset(dir: "${project.projectDir}/.gradle/reaper", followsymlinks: false, erroronmissingdir: false) - } - } catch (Exception e) { - logger.lifecycle("Failed to archive additional logs", e) - } - - if (uploadFile.exists() && System.getenv("BUILDKITE") == "true") { - try { - println "Uploading buildkite artifact: ${uploadFilePath}..." - new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath) - .start() - .waitFor() - - println "Generating buildscan link for artifact..." - - def process = new ProcessBuilder("buildkite-agent", "artifact", "search", uploadFilePath, "--step", System.getenv('BUILDKITE_JOB_ID'), "--format", "%i").start() - process.waitFor() - def artifactUuid = (process.text ?: "").trim() - - println "Artifact UUID: ${artifactUuid}" - if (artifactUuid) { - buildScan.link 'Artifact Upload', "https://buildkite.com/organizations/elastic/pipelines/${System.getenv('BUILDKITE_PIPELINE_SLUG')}/builds/${buildNumber}/jobs/${System.getenv('BUILDKITE_JOB_ID')}/artifacts/${artifactUuid}" - } - } catch (Exception e) { - logger.lifecycle("Failed to upload buildkite artifact", e) - } - } - } -} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 0f56dd2ef8992..b7f3932effa96 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -132,6 +132,7 @@ buildScan { } buildFinished { result -> + buildScanPublished { scan -> // Attach build scan link as build metadata // See: https://buildkite.com/docs/pipelines/build-meta-data @@ -142,18 +143,17 @@ buildScan { // Add a build annotation // See: https://buildkite.com/docs/agent/v3/cli-annotate def body = """
${System.getenv('BUILDKITE_LABEL')} :gradle: ${result.failure ? 'failed' : 'successful'} build: gradle ${gradle.startParameter.taskNames.join(' ')}
""" - new ProcessBuilder( + def process = [ 'buildkite-agent', 'annotate', '--context', result.failure ? 'gradle-build-scans-failed' : 'gradle-build-scans', '--append', '--style', - result.failure ? 'error' : 'info', - body - ) - .start() - .waitFor() + result.failure ? 'error' : 'info' + ].execute() + process.withWriter { it.write(body) } // passing the body in as an argument has issues on Windows, so let's use stdin of the process instead + process.waitFor() } } } else { diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index 08fbc5b67e978..f691d4bd996a7 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -23,9 +23,15 @@ if (BuildParams.inFipsJvm) { File fipsSecurity = new File(fipsResourcesDir, javaSecurityFilename) File fipsPolicy = new File(fipsResourcesDir, 'fips_java.policy') File fipsTrustStore = new File(fipsResourcesDir, 'cacerts.bcfks') - def bcFips = dependencies.create('org.bouncycastle:bc-fips:1.0.2') - def bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.9') - + def bcFips = dependencies.create('org.bouncycastle:bc-fips:1.0.2.4') + def bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.17') + def manualDebug = false; //change this to manually debug bouncy castle in an IDE + if(manualDebug) { + bcFips = dependencies.create('org.bouncycastle:bc-fips-debug:1.0.2.4') + bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.17'){ + exclude group: 'org.bouncycastle', module: 'bc-fips' // to avoid jar hell + } + } pluginManager.withPlugin('java-base') { TaskProvider fipsResourcesTask = project.tasks.register('fipsResources', ExportElasticsearchBuildResourcesTask) fipsResourcesTask.configure { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java index f709600fc7979..63147040a289d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java @@ -13,6 +13,7 @@ import org.elasticsearch.gradle.internal.conventions.util.Util; import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.precommit.JarHellPrecommitPlugin; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; @@ -36,6 +37,7 @@ public void apply(Project project) { project.getPluginManager().apply(PluginBuildPlugin.class); project.getPluginManager().apply(JarHellPrecommitPlugin.class); project.getPluginManager().apply(ElasticsearchJavaPlugin.class); + project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); // Clear default dependencies added by public PluginBuildPlugin as we add our // own project dependencies for internal builds // TODO remove once we removed default dependencies from PluginBuildPlugin @@ -72,13 +74,12 @@ public void doCall() { } }); + boolean isModule = GradleUtils.isModuleProject(project.getPath()); + boolean isXPackModule = isModule && project.getPath().startsWith(":x-pack"); + if (isModule == false || isXPackModule) { + addNoticeGeneration(project, extension); + } project.afterEvaluate(p -> { - boolean isModule = GradleUtils.isModuleProject(p.getPath()); - boolean isXPackModule = isModule && p.getPath().startsWith(":x-pack"); - if (isModule == false || isXPackModule) { - addNoticeGeneration(p, extension); - } - @SuppressWarnings("unchecked") NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project .getExtensions() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java index 6849796579ad9..6c7bc6753531c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; import org.elasticsearch.gradle.internal.snyk.SnykDependencyMonitoringGradlePlugin; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.gradle.api.InvalidUserDataException; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -61,6 +62,7 @@ public void apply(final Project project) { project.getPluginManager().apply(ElasticsearchJavadocPlugin.class); project.getPluginManager().apply(DependenciesInfoPlugin.class); project.getPluginManager().apply(SnykDependencyMonitoringGradlePlugin.class); + project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); InternalPrecommitTasks.create(project, true); configureLicenseAndNotice(project); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java new file mode 100644 index 0000000000000..4902168d9b4ff --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import com.gradle.scan.plugin.BuildScanExtension; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream; +import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.FileSystemOperations; +import org.gradle.api.flow.FlowAction; +import org.gradle.api.flow.FlowParameters; +import org.gradle.api.flow.FlowProviders; +import org.gradle.api.flow.FlowScope; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.provider.ListProperty; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.Input; +import org.jetbrains.annotations.NotNull; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +public abstract class ElasticsearchBuildCompletePlugin implements Plugin { + + @Inject + protected abstract FlowScope getFlowScope(); + + @Inject + protected abstract FlowProviders getFlowProviders(); + + @Inject + protected abstract FileOperations getFileOperations(); + + @Override + public void apply(Project target) { + String buildNumber = System.getenv("BUILD_NUMBER") != null + ? System.getenv("BUILD_NUMBER") + : System.getenv("BUILDKITE_BUILD_NUMBER"); + String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); + if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { + File targetFile = target.file("build/" + buildNumber + ".tar.bz2"); + File projectDir = target.getProjectDir(); + File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); + BuildScanExtension extension = target.getExtensions().getByType(BuildScanExtension.class); + File daemonsLogDir = new File(target.getGradle().getGradleUserHomeDir(), "daemon/" + target.getGradle().getGradleVersion()); + + getFlowScope().always(BuildFinishedFlowAction.class, spec -> { + spec.getParameters().getBuildScan().set(extension); + spec.getParameters().getUploadFile().set(targetFile); + spec.getParameters().getProjectDir().set(projectDir); + spec.getParameters().getFilteredFiles().addAll(getFlowProviders().getBuildWorkResult().map((result) -> { + System.out.println("Build Finished Action: Collecting archive files..."); + List files = new ArrayList<>(); + files.addAll(resolveProjectLogs(projectDir)); + if (files.isEmpty() == false) { + files.addAll(resolveDaemonLogs(daemonsLogDir)); + files.addAll(getFileOperations().fileTree(gradleWorkersDir).getFiles()); + files.addAll(getFileOperations().fileTree(new File(projectDir, ".gradle/reaper/")).getFiles()); + } + return files; + })); + }); + } + } + + private List resolveProjectLogs(File projectDir) { + var projectDirFiles = getFileOperations().fileTree(projectDir); + projectDirFiles.include("**/*.hprof"); + projectDirFiles.include("**/build/test-results/**/*.xml"); + projectDirFiles.include("**/build/testclusters/**"); + projectDirFiles.include("**/build/testrun/*/temp/**"); + projectDirFiles.include("**/build/**/hs_err_pid*.log"); + projectDirFiles.exclude("**/build/testclusters/**/data/**"); + projectDirFiles.exclude("**/build/testclusters/**/distro/**"); + projectDirFiles.exclude("**/build/testclusters/**/repo/**"); + projectDirFiles.exclude("**/build/testclusters/**/extract/**"); + projectDirFiles.exclude("**/build/testclusters/**/tmp/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/data/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/distro/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/repo/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/extract/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/tmp/**"); + return projectDirFiles.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + private List resolveDaemonLogs(File daemonsLogDir) { + var gradleDaemonFileSet = getFileOperations().fileTree(daemonsLogDir); + gradleDaemonFileSet.include("**/daemon-" + ProcessHandle.current().pid() + "*.log"); + return gradleDaemonFileSet.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + public abstract static class BuildFinishedFlowAction implements FlowAction { + interface Parameters extends FlowParameters { + @Input + Property getUploadFile(); + + @Input + Property getProjectDir(); + + @Input + ListProperty getFilteredFiles(); + + @Input + Property getBuildScan(); + + } + + @Inject + protected abstract FileSystemOperations getFileSystemOperations(); + + @SuppressWarnings("checkstyle:DescendantToken") + @Override + public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNotFoundException { + File uploadFile = parameters.getUploadFile().get(); + if (uploadFile.exists()) { + getFileSystemOperations().delete(spec -> spec.delete(uploadFile)); + } + uploadFile.getParentFile().mkdirs(); + createBuildArchiveTar(parameters.getFilteredFiles().get(), parameters.getProjectDir().get(), uploadFile); + if (uploadFile.exists() && System.getenv("BUILDKITE").equals("true")) { + String uploadFilePath = "build/" + uploadFile.getName(); + try { + System.out.println("Uploading buildkite artifact: " + uploadFilePath + "..."); + new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath).start().waitFor(); + + System.out.println("Generating buildscan link for artifact..."); + + Process process = new ProcessBuilder( + "buildkite-agent", + "artifact", + "search", + uploadFilePath, + "--step", + System.getenv("BUILDKITE_JOB_ID"), + "--format", + "%i" + ).start(); + process.waitFor(); + String processOutput; + try { + processOutput = IOUtils.toString(process.getInputStream()); + } catch (IOException e) { + processOutput = ""; + } + String artifactUuid = processOutput.trim(); + + System.out.println("Artifact UUID: " + artifactUuid); + if (artifactUuid.isEmpty() == false) { + String buildkitePipelineSlug = System.getenv("BUILDKITE_PIPELINE_SLUG"); + String targetLink = "https://buildkite.com/organizations/elastic/pipelines/" + + buildkitePipelineSlug + + "/builds/" + + System.getenv("BUILD_NUMBER") + + "/jobs/" + + System.getenv("BUILDKITE_JOB_ID") + + "/artifacts/" + + artifactUuid; + parameters.getBuildScan().get().link("Artifact Upload", targetLink); + } + } catch (Exception e) { + System.out.println("Failed to upload buildkite artifact " + e.getMessage()); + } + } + + } + + private static void createBuildArchiveTar(List files, File projectDir, File uploadFile) { + try ( + OutputStream fOut = Files.newOutputStream(uploadFile.toPath()); + BufferedOutputStream buffOut = new BufferedOutputStream(fOut); + BZip2CompressorOutputStream bzOut = new BZip2CompressorOutputStream(buffOut); + TarArchiveOutputStream tOut = new TarArchiveOutputStream(bzOut) + ) { + Path projectPath = projectDir.toPath(); + tOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); + tOut.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_STAR); + for (Path path : files.stream().map(File::toPath).toList()) { + if (!Files.isRegularFile(path)) { + throw new IOException("Support only file!"); + } + + TarArchiveEntry tarEntry = new TarArchiveEntry(path.toFile(), calculateArchivePath(path, projectPath)); + tarEntry.setSize(Files.size(path)); + tOut.putArchiveEntry(tarEntry); + + // copy file to TarArchiveOutputStream + Files.copy(path, tOut); + tOut.closeArchiveEntry(); + + } + tOut.flush(); + tOut.finish(); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @NotNull + private static String calculateArchivePath(Path path, Path projectPath) { + return path.startsWith(projectPath) ? projectPath.relativize(path).toString() : path.getFileName().toString(); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 7a5bead71fb0e..4f9a7284c83e1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -91,7 +91,6 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S List sourceSetConfigurationNames = List.of( sourceSet.getApiConfigurationName(), sourceSet.getImplementationConfigurationName(), - sourceSet.getImplementationConfigurationName(), sourceSet.getCompileOnlyConfigurationName(), sourceSet.getRuntimeOnlyConfigurationName() ); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java index 867ccb203de0d..15a224b0ff206 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java @@ -9,7 +9,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.tasks.Input; -import org.gradle.api.tasks.Internal; +import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.TaskAction; import org.gradle.internal.file.Chmod; @@ -39,7 +39,7 @@ public Chmod getChmod() { throw new UnsupportedOperationException(); } - @Internal + @OutputDirectory public File getDir() { return dir; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java index d249cf756ca8d..6fafe513662c5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java @@ -207,29 +207,12 @@ private static void assertLinesInFile(Path path, List expectedLines) { } } - private static boolean toolExists(Project project) { - if (project.getName().contains("tar")) { - return tarExists(); - } else { - assert project.getName().contains("zip"); - return zipExists(); - } - } - private static void assertNoClassFile(File file) { if (file.getName().endsWith(".class")) { throw new GradleException("Detected class file in distribution ('" + file.getName() + "')"); } } - private static boolean zipExists() { - return new File("/bin/unzip").exists() || new File("/usr/bin/unzip").exists() || new File("/usr/local/bin/unzip").exists(); - } - - private static boolean tarExists() { - return new File("/bin/tar").exists() || new File("/usr/bin/tar").exists() || new File("/usr/local/bin/tar").exists(); - } - private Object distTaskOutput(TaskProvider buildDistTask) { return new Callable() { @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 2468711561ae4..f727dc165a8a9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -286,11 +286,12 @@ static void createBuildBwcTask( if (project.getGradle().getStartParameter().isBuildCacheEnabled()) { c.getArgs().add("--build-cache"); } + File rootDir = project.getRootDir(); c.doLast(new Action() { @Override public void execute(Task task) { if (expectedOutputFile.exists() == false) { - Path relativeOutputPath = project.getRootDir().toPath().relativize(expectedOutputFile.toPath()); + Path relativeOutputPath = rootDir.toPath().relativize(expectedOutputFile.toPath()); final String message = "Building %s didn't generate expected artifact [%s]. The working branch may be " + "out-of-date - try merging in the latest upstream changes to the branch."; throw new InvalidUserDataException(message.formatted(bwcVersion.get(), relativeOutputPath)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index 16c7bf6d32862..f92789f701049 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -23,16 +23,17 @@ import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.GradleException; -import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.provider.Provider; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.function.Function; -import static org.elasticsearch.gradle.util.GradleUtils.projectDependency; - /** * An internal elasticsearch build plugin that registers additional * distribution resolution strategies to the 'elasticsearch.download-distribution' plugin @@ -64,18 +65,18 @@ public void apply(Project project) { *

* BWC versions are resolved as project to projects under `:distribution:bwc`. */ - private void registerInternalDistributionResolutions(NamedDomainObjectContainer resolutions) { - resolutions.register("localBuild", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { + private void registerInternalDistributionResolutions(List resolutions) { + resolutions.add(new DistributionResolution("local-build", (project, distribution) -> { if (isCurrentVersion(distribution)) { // non-external project, so depend on local build return new ProjectBasedDistributionDependency( - config -> projectDependency(project, distributionProjectPath(distribution), config) + config -> projectDependency(project.getDependencies(), distributionProjectPath(distribution), config) ); } return null; })); - resolutions.register("bwc", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { + resolutions.add(new DistributionResolution("bwc", (project, distribution) -> { BwcVersions.UnreleasedVersionInfo unreleasedInfo = BuildParams.getBwcVersions() .unreleasedInfo(Version.fromString(distribution.getVersion())); if (unreleasedInfo != null) { @@ -89,7 +90,7 @@ private void registerInternalDistributionResolutions(NamedDomainObjectContainer< } String projectConfig = getProjectConfig(distribution, unreleasedInfo); return new ProjectBasedDistributionDependency( - (config) -> projectDependency(project, unreleasedInfo.gradleProjectPath(), projectConfig) + (config) -> projectDependency(project.getDependencies(), unreleasedInfo.gradleProjectPath(), projectConfig) ); } return null; @@ -116,6 +117,13 @@ private static String getProjectConfig(ElasticsearchDistribution distribution, B } } + private static Dependency projectDependency(DependencyHandler dependencyHandler, String projectPath, String projectConfig) { + Map depConfig = new HashMap<>(); + depConfig.put("path", projectPath); + depConfig.put("configuration", projectConfig); + return dependencyHandler.project(depConfig); + } + private static String distributionProjectPath(ElasticsearchDistribution distribution) { String projectPath = ":distribution"; if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java index 1e2506908d108..751ac92512dad 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/NoticeTask.java @@ -16,11 +16,11 @@ import org.gradle.api.file.ProjectLayout; import org.gradle.api.file.SourceDirectorySet; import org.gradle.api.internal.file.FileOperations; -import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.PathSensitive; @@ -43,7 +43,7 @@ * A task to create a notice file which includes dependencies' notices. */ @CacheableTask -public class NoticeTask extends DefaultTask { +public abstract class NoticeTask extends DefaultTask { @InputFile @PathSensitive(PathSensitivity.RELATIVE) @@ -57,19 +57,17 @@ public class NoticeTask extends DefaultTask { /** * Directories to include notices from */ - private final ListProperty licensesDirs; + @Internal + abstract ListProperty getLicenseDirs(); private final FileOperations fileOperations; - private ObjectFactory objectFactory; @Inject - public NoticeTask(BuildLayout buildLayout, ProjectLayout projectLayout, FileOperations fileOperations, ObjectFactory objectFactory) { - this.objectFactory = objectFactory; + public NoticeTask(BuildLayout buildLayout, ProjectLayout projectLayout, FileOperations fileOperations) { this.fileOperations = fileOperations; setDescription("Create a notice file from dependencies"); // Default licenses directory is ${projectDir}/licenses (if it exists) - licensesDirs = objectFactory.listProperty(File.class); - licensesDirs.add(projectLayout.getProjectDirectory().dir("licenses").getAsFile()); + getLicenseDirs().add(projectLayout.getProjectDirectory().dir("licenses").getAsFile()); inputFile = new File(buildLayout.getRootDirectory(), "NOTICE.txt"); outputFile = projectLayout.getBuildDirectory().dir("notices/" + getName()).get().file("NOTICE.txt").getAsFile(); } @@ -78,7 +76,7 @@ public NoticeTask(BuildLayout buildLayout, ProjectLayout projectLayout, FileOper * Add notices from the specified directory. */ public void licensesDir(File licensesDir) { - licensesDirs.add(licensesDir); + getLicenseDirs().add(licensesDir); } public void source(Object source) { @@ -185,7 +183,7 @@ public FileCollection getNoticeFiles() { } private List existingLicenseDirs() { - return licensesDirs.get().stream().filter(d -> d.exists()).collect(Collectors.toList()); + return getLicenseDirs().get().stream().filter(d -> d.exists()).collect(Collectors.toList()); } @InputFiles diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index b32c566363e88..cafa02941d77c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -28,22 +28,14 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag ListMultimap, String> map = ArrayListMultimap.create(1, 200); map.put(LegacyRestTestBasePlugin.class, ":docs"); map.put(LegacyRestTestBasePlugin.class, ":distribution:docker"); - map.put(LegacyRestTestBasePlugin.class, ":modules:analysis-common"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-attachment"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-common"); - map.put(LegacyRestTestBasePlugin.class, ":modules:ingest-user-agent"); - map.put(LegacyRestTestBasePlugin.class, ":modules:kibana"); map.put(LegacyRestTestBasePlugin.class, ":modules:lang-expression"); map.put(LegacyRestTestBasePlugin.class, ":modules:lang-mustache"); - map.put(LegacyRestTestBasePlugin.class, ":modules:lang-painless"); map.put(LegacyRestTestBasePlugin.class, ":modules:mapper-extras"); map.put(LegacyRestTestBasePlugin.class, ":modules:parent-join"); map.put(LegacyRestTestBasePlugin.class, ":modules:percolator"); map.put(LegacyRestTestBasePlugin.class, ":modules:rank-eval"); map.put(LegacyRestTestBasePlugin.class, ":modules:reindex"); - map.put(LegacyRestTestBasePlugin.class, ":modules:repository-s3"); map.put(LegacyRestTestBasePlugin.class, ":modules:repository-url"); - map.put(LegacyRestTestBasePlugin.class, ":modules:runtime-fields-common"); map.put(LegacyRestTestBasePlugin.class, ":modules:transport-netty4"); map.put(LegacyRestTestBasePlugin.class, ":plugins:analysis-icu"); map.put(LegacyRestTestBasePlugin.class, ":plugins:analysis-kuromoji"); @@ -74,14 +66,13 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":qa:system-indices"); map.put(LegacyRestTestBasePlugin.class, ":qa:unconfigured-node-name"); map.put(LegacyRestTestBasePlugin.class, ":qa:verify-version-constants"); + map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-apm-integration"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-delayed-aggs"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-die-with-dignity"); - map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-apm-integration"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-error-query"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-latency-simulating-directory"); map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-seek-tracking-directory"); map.put(LegacyRestTestBasePlugin.class, ":test:yaml-rest-runner"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin"); map.put(LegacyRestTestBasePlugin.class, ":distribution:archives:integ-test-zip"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:core"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ent-search"); @@ -92,17 +83,13 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:mapper-version"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:wildcard"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:kerberos-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:mixed-tier-cluster"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:password-protected-keystore"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:reindex-tests-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:repository-old-versions"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade-basic"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:rolling-upgrade-multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:core-with-mapped"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:core-with-search"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:saml-idp-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:security-example-spi-extension"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:security-setup-password-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:smoke-test-plugins"); @@ -115,12 +102,10 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-basic-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-full-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:multi-cluster-search-security:legacy-with-restricted-trust"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:runtime-fields:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:jira"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:pagerduty"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:qa:third-party:slack"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:async-search:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:async-search:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:autoscaling:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:downgrade-to-basic-license"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:multi-cluster"); @@ -130,7 +115,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ccr:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:deprecation:qa:early-deprecation-rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:deprecation:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:downsample:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:downsample:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:enrich:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:enrich:qa:rest-with-advanced-security"); @@ -139,55 +123,40 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:ccs-rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:correctness"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:multi-cluster-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:graph:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:identity-provider:qa:idp-rest-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:basic-multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:disabled"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:ml-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:multi-cluster-tests-with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:native-multi-node-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ml:qa:single-node-tests"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:repositories-metering-api:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:hdfs"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:minio"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:searchable-snapshots:qa:url"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:operator-privileges-tests"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:profile"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:security-disabled"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:smoke-test-all-realms"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:security:qa:tls-basic"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:shutdown:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:shutdown:qa:rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:slm:qa:with-security"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:fs"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:license-enforcing"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-based-recoveries:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:hdfs"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:minio"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:snapshot-repo-test-kit:qa:s3"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:multi-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:no-sql"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:security:with-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:jdbc:security:without-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:multi-cluster-with-security"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:multi-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:security:with-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:sql:qa:server:security:without-ssl"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:stack:qa:rest"); @@ -197,8 +166,8 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:transform:qa:single-node-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:rest"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-monitoring"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-security"); + map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster"); return map; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java index bb0b8dcf04437..d69a355a3595d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java @@ -10,6 +10,7 @@ import de.thetaphi.forbiddenapis.Checker; import de.thetaphi.forbiddenapis.Constants; +import de.thetaphi.forbiddenapis.ForbiddenApiException; import de.thetaphi.forbiddenapis.Logger; import de.thetaphi.forbiddenapis.ParseException; import groovy.lang.Closure; @@ -43,6 +44,7 @@ import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.VerificationException; import org.gradle.api.tasks.VerificationTask; import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; @@ -469,6 +471,8 @@ public void execute() { } checker.run(); writeMarker(getParameters().getSuccessMarker().getAsFile().get()); + } catch (ForbiddenApiException e) { + throw new VerificationException("Forbidden API verification failed", e); } catch (Exception e) { throw new RuntimeException(e); } finally { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index a25ad34a241d4..d2ba86bb99cf2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -218,7 +218,8 @@ public void runThirdPartyAudit() throws IOException { if (bogousExcludesCount != 0 && bogousExcludesCount == missingClassExcludes.size() + violationsExcludes.size()) { logForbiddenAPIsOutput(forbiddenApisOutput); throw new IllegalStateException( - "All excluded classes seem to have no issues. This is sometimes an indication that the check silently failed" + "All excluded classes seem to have no issues. This is sometimes an indication that the check silently failed " + + "or that exclusions are configured unnecessarily" ); } assertNoPointlessExclusions("are not missing", missingClassExcludes, missingClasses); @@ -261,10 +262,6 @@ private void logForbiddenAPIsOutput(String forbiddenApisOutput) { getLogger().error("Forbidden APIs output:\n{}==end of forbidden APIs==", forbiddenApisOutput); } - private void throwNotConfiguredCorrectlyException() { - throw new IllegalArgumentException("Audit of third party dependencies is not configured correctly"); - } - /** * Ideally we would do unpacking already via artifact transform and keep unpacked jars across builds. * At the moment transform target folder is not configurable and forbidden CLI only takes one common diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index bcbe1740630ce..42d3a770dbbcc 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -101,7 +101,7 @@ public void apply(Project project) { addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString()); t.exclude("**/PackageUpgradeTests.class"); - }, distribution.getArchiveDependencies(), examplePlugin.getDependencies()); + }, distribution, examplePlugin.getDependencies()); if (distribution.getPlatform() == Platform.WINDOWS) { windowsTestTasks.add(destructiveTask); @@ -235,6 +235,7 @@ private static ElasticsearchDistribution createDistro( d.setBundledJdk(bundledJdk); } d.setVersion(version); + d.setPreferArchive(true); }); // Allow us to gracefully omit building Docker distributions if Docker is not available on the system. diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java new file mode 100644 index 0000000000000..bd9df6d3903ca --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.TaskProvider; + +import java.util.Map; + +/** + * Extracts historical feature metadata into a machine-readable format for use in backward compatibility testing. + */ +public class HistoricalFeaturesMetadataPlugin implements Plugin { + public static final String HISTORICAL_FEATURES_JSON = "historical-features.json"; + public static final String FEATURES_METADATA_TYPE = "features-metadata-json"; + public static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadata"; + + @Override + public void apply(Project project) { + Configuration featureMetadataExtractorConfig = project.getConfigurations().create("featuresMetadataExtractor", c -> { + // Don't bother adding this dependency if the project doesn't exist which simplifies testing + if (project.findProject(":test:metadata-extractor") != null) { + c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":test:metadata-extractor")))); + } + }); + + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet mainSourceSet = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME); + + TaskProvider generateTask = project.getTasks() + .register("generateHistoricalFeaturesMetadata", HistoricalFeaturesMetadataTask.class, task -> { + task.setClasspath( + featureMetadataExtractorConfig.plus(mainSourceSet.getRuntimeClasspath()) + .plus(project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)) + ); + task.getOutputFile().convention(project.getLayout().getBuildDirectory().file(HISTORICAL_FEATURES_JSON)); + }); + + Configuration featuresMetadataArtifactConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeResolved(false); + c.setCanBeConsumed(true); + c.attributes(a -> { a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, FEATURES_METADATA_TYPE); }); + }); + + project.getArtifacts().add(featuresMetadataArtifactConfig.getName(), generateTask); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java new file mode 100644 index 0000000000000..0891225d1e1ef --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.LoggedExec; +import org.gradle.api.DefaultTask; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.TaskAction; +import org.gradle.process.ExecOperations; +import org.gradle.workers.WorkAction; +import org.gradle.workers.WorkParameters; +import org.gradle.workers.WorkerExecutor; + +import javax.inject.Inject; + +@CacheableTask +public abstract class HistoricalFeaturesMetadataTask extends DefaultTask { + private FileCollection classpath; + + @OutputFile + public abstract RegularFileProperty getOutputFile(); + + @Classpath + public FileCollection getClasspath() { + return classpath; + } + + public void setClasspath(FileCollection classpath) { + this.classpath = classpath; + } + + @Inject + public abstract WorkerExecutor getWorkerExecutor(); + + @TaskAction + public void execute() { + getWorkerExecutor().noIsolation().submit(HistoricalFeaturesMetadataWorkAction.class, params -> { + params.getClasspath().setFrom(getClasspath()); + params.getOutputFile().set(getOutputFile()); + }); + } + + public interface HistoricalFeaturesWorkParameters extends WorkParameters { + ConfigurableFileCollection getClasspath(); + + RegularFileProperty getOutputFile(); + } + + public abstract static class HistoricalFeaturesMetadataWorkAction implements WorkAction { + private final ExecOperations execOperations; + + @Inject + public HistoricalFeaturesMetadataWorkAction(ExecOperations execOperations) { + this.execOperations = execOperations; + } + + @Override + public void execute() { + LoggedExec.javaexec(execOperations, spec -> { + spec.getMainClass().set("org.elasticsearch.extractor.features.HistoricalFeaturesMetadataExtractor"); + spec.classpath(getParameters().getClasspath()); + spec.args(getParameters().getOutputFile().get().getAsFile().getAbsolutePath()); + }); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java index eacc5da6220ab..b29efbfab069f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java @@ -22,9 +22,12 @@ import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; +import org.gradle.api.specs.NotSpec; +import org.gradle.api.specs.Spec; import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.bundling.Zip; @@ -47,6 +50,7 @@ public class LegacyRestTestBasePlugin implements Plugin { private static final String TESTS_CLUSTER_REMOTE_ACCESS = "tests.cluster.remote_access"; private ProviderFactory providerFactory; + private Project project; @Inject public LegacyRestTestBasePlugin(ProviderFactory providerFactory) { @@ -55,6 +59,7 @@ public LegacyRestTestBasePlugin(ProviderFactory providerFactory) { @Override public void apply(Project project) { + this.project = project; Provider serviceProvider = project.getGradle() .getSharedServices() .registerIfAbsent("restrictedBuildAPI", RestrictedBuildApiService.class, spec -> { @@ -118,9 +123,26 @@ public void apply(Project project) { t.getClusters().forEach(c -> c.plugin(bundle)); } }); + configureCacheability(t); }); } + private void configureCacheability(StandaloneRestIntegTestTask testTask) { + Spec taskSpec = task -> testTask.getClusters().stream().anyMatch(ElasticsearchCluster::isShared); + testTask.getOutputs() + .doNotCacheIf( + "Caching disabled for this task since it uses a cluster shared by other tasks", + /* + * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster + * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To + * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between + * multiple tasks. + */ + taskSpec + ); + testTask.getOutputs().upToDateWhen(new NotSpec(taskSpec)); + } + private String systemProperty(String propName) { return providerFactory.systemProperty(propName).getOrNull(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java index 524f3dfedf95f..833c7ad546a4a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java @@ -85,7 +85,7 @@ public void apply(Project project) { NamedDomainObjectContainer clusters = (NamedDomainObjectContainer) project .getExtensions() .getByName(TestClustersPlugin.EXTENSION_NAME); - clusters.all(c -> { + clusters.configureEach(c -> { if (BuildParams.isInFipsJvm()) { c.setting("xpack.security.transport.ssl.key", "test-node.key"); c.keystore("xpack.security.transport.ssl.secure_key_passphrase", "test-node-key-password"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index c602a50c2adb8..a7e72b55f9117 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; @@ -35,9 +36,12 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.DependencySet; import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; import org.gradle.api.attributes.Attribute; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.ClasspathNormalizer; @@ -72,6 +76,9 @@ public class RestTestBasePlugin implements Plugin { private static final String PLUGINS_CONFIGURATION = "clusterPlugins"; private static final String EXTRACTED_PLUGINS_CONFIGURATION = "extractedPlugins"; private static final Attribute CONFIGURATION_ATTRIBUTE = Attribute.of("test-cluster-artifacts", String.class); + private static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadataDeps"; + private static final String DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION = "defaultDistrofeaturesMetadataDeps"; + private static final String TESTS_FEATURES_METADATA_PATH = "tests.features.metadata.path"; private final ProviderFactory providerFactory; @@ -105,6 +112,36 @@ public void apply(Project project) { extractedPluginsConfiguration.extendsFrom(pluginsConfiguration); configureArtifactTransforms(project); + // Create configuration for aggregating historical feature metadata + FileCollection featureMetadataConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeConsumed(false); + c.setCanBeResolved(true); + c.attributes( + a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + ); + c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":server")))); + c.withDependencies(dependencies -> { + // We can't just use Configuration#extendsFrom() here as we'd inherit the wrong project configuration + copyDependencies(project, dependencies, modulesConfiguration); + copyDependencies(project, dependencies, pluginsConfiguration); + }); + }); + + FileCollection defaultDistroFeatureMetadataConfig = project.getConfigurations() + .create(DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION, c -> { + c.setCanBeConsumed(false); + c.setCanBeResolved(true); + c.attributes( + a -> a.attribute( + ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, + HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE + ) + ); + c.defaultDependencies( + d -> d.add(project.getDependencies().project(Map.of("path", ":distribution", "configuration", "featuresMetadata"))) + ); + }); + // For plugin and module projects, register the current project plugin bundle as a dependency project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> { if (GradleUtils.isModuleProject(project.getPath())) { @@ -122,6 +159,10 @@ public void apply(Project project) { task.dependsOn(integTestDistro, modulesConfiguration); registerDistributionInputs(task, integTestDistro); + // Pass feature metadata on to tests + task.getInputs().files(featureMetadataConfig).withPathSensitivity(PathSensitivity.NONE); + nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, () -> featureMetadataConfig.getAsPath()); + // Enable parallel execution for these tests since each test gets its own cluster task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks())); @@ -134,16 +175,20 @@ public void apply(Project project) { task.systemProperty("tests.system_call_filter", "false"); // Register plugins and modules as task inputs and pass paths as system properties to tests - nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulesConfiguration::getAsPath); - registerConfigurationInputs(task, modulesConfiguration); - nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginsConfiguration::getAsPath); - registerConfigurationInputs(task, extractedPluginsConfiguration); + var modulePath = project.getObjects().fileCollection().from(modulesConfiguration); + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulePath::getAsPath); + registerConfigurationInputs(task, modulesConfiguration.getName(), modulePath); + var pluginPath = project.getObjects().fileCollection().from(pluginsConfiguration); + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginPath::getAsPath); + registerConfigurationInputs( + task, + extractedPluginsConfiguration.getName(), + project.getObjects().fileCollection().from(extractedPluginsConfiguration) + ); // Wire up integ-test distribution by default for all test tasks - nonInputSystemProperties.systemProperty( - INTEG_TEST_DISTRIBUTION_SYSPROP, - () -> integTestDistro.getExtracted().getSingleFile().getPath() - ); + FileCollection extracted = integTestDistro.getExtracted(); + nonInputSystemProperties.systemProperty(INTEG_TEST_DISTRIBUTION_SYSPROP, () -> extracted.getSingleFile().getPath()); nonInputSystemProperties.systemProperty(TESTS_RUNTIME_JAVA_SYSPROP, BuildParams.getRuntimeJavaHome()); // Add `usesDefaultDistribution()` extension method to test tasks to indicate they require the default distro @@ -157,6 +202,11 @@ public Void call(Object... args) { DEFAULT_DISTRIBUTION_SYSPROP, providerFactory.provider(() -> defaultDistro.getExtracted().getSingleFile().getPath()) ); + + // If we are using the default distribution we need to register all module feature metadata + task.getInputs().files(defaultDistroFeatureMetadataConfig).withPathSensitivity(PathSensitivity.NONE); + nonInputSystemProperties.systemProperty(TESTS_FEATURES_METADATA_PATH, defaultDistroFeatureMetadataConfig::getAsPath); + return null; } }); @@ -192,6 +242,14 @@ public Void call(Object... args) { }); } + private void copyDependencies(Project project, DependencySet dependencies, Configuration configuration) { + configuration.getDependencies() + .stream() + .filter(d -> d instanceof ProjectDependency) + .map(d -> project.getDependencies().project(Map.of("path", ((ProjectDependency) d).getDependencyProject().getPath()))) + .forEach(dependencies::add); + } + private ElasticsearchDistribution createDistribution(Project project, String name, String version) { return createDistribution(project, name, version, null); } @@ -216,15 +274,15 @@ private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Ac return distribution.getExtracted().getAsFileTree().matching(patternFilter); } - private void registerConfigurationInputs(Task task, Configuration configuration) { + private void registerConfigurationInputs(Task task, String configurationName, ConfigurableFileCollection configuration) { task.getInputs() .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false))) - .withPropertyName(configuration.getName() + "-files") + .withPropertyName(configurationName + "-files") .withPathSensitivity(PathSensitivity.RELATIVE); task.getInputs() .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")))) - .withPropertyName(configuration.getName() + "-classpath") + .withPropertyName(configurationName + "-classpath") .withNormalizer(ClasspathNormalizer.class); } diff --git a/build-tools-internal/src/main/resources/fips_java.policy b/build-tools-internal/src/main/resources/fips_java.policy index 4ef62e03c2546..bbfc1caf7593a 100644 --- a/build-tools-internal/src/main/resources/fips_java.policy +++ b/build-tools-internal/src/main/resources/fips_java.policy @@ -1,6 +1,10 @@ grant { permission java.security.SecurityPermission "putProviderProperty.BCFIPS"; permission java.security.SecurityPermission "putProviderProperty.BCJSSE"; + permission java.security.SecurityPermission "getProperty.keystore.type.compat"; + permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; + permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; permission java.lang.RuntimePermission "getProtectionDomain"; permission java.util.PropertyPermission "java.runtime.name", "read"; permission org.bouncycastle.crypto.CryptoServicesPermission "tlsAlgorithmsEnabled"; diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 34f39bbc4ca54..48c888acd35e2 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -158,6 +158,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions() @defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#nodeFeatures() +@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. +org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index dc43523b747b3..98d3ad1eff10b 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -20,10 +20,9 @@ google_oauth_client = 1.34.1 antlr4 = 4.11.1 # when updating this version, you need to ensure compatibility with: -# - modules/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli # - x-pack/plugin/security -bouncycastle=1.64 +bouncycastle=1.76 # used by security and idp (need to be in sync due to cross-dependency in testing) opensaml = 4.3.0 @@ -42,6 +41,12 @@ junit5 = 5.7.1 hamcrest = 2.1 mocksocket = 1.2 +# test container dependencies +testcontainer = 1.19.2 +dockerJava = 3.3.4 +ductTape = 1.0.8 +commonsCompress = 1.24.0 + # benchmark dependencies jmh = 1.26 diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy index 6b662b8165034..22efa8d08d3e7 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy @@ -34,7 +34,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.testclusters' } - class SomeClusterAwareTask extends DefaultTask implements TestClustersAware { + abstract class SomeClusterAwareTask extends DefaultTask implements TestClustersAware { private Collection clusters = new HashSet<>(); @@ -166,7 +166,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { then: result.output.contains("Task ':myTask' is not up-to-date because:\n" + - " Input property 'clusters.myCluster\$0.nodes.\$0.$propertyName'") + " Input property 'clusters.myCluster\$0.$propertyName'") result.output.contains("elasticsearch-keystore script executed!") assertEsOutputContains("myCluster", "Starting Elasticsearch process") assertEsOutputContains("myCluster", "Stopping node") diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index d08dc469e5ba5..fb8416b24d052 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.transform.SymbolicLinkPreservingUntarTransform; import org.elasticsearch.gradle.transform.UnzipTransform; +import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -22,7 +23,8 @@ import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; -import java.util.Comparator; +import java.util.ArrayList; +import java.util.List; import javax.inject.Inject; @@ -42,9 +44,10 @@ public class DistributionDownloadPlugin implements Plugin { private static final String DOWNLOAD_REPO_NAME = "elasticsearch-downloads"; private static final String SNAPSHOT_REPO_NAME = "elasticsearch-snapshots"; public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = "es_distro_extracted_"; + public static final String DISTRO_CONFIG_PREFIX = "es_distro_file_"; private NamedDomainObjectContainer distributionsContainer; - private NamedDomainObjectContainer distributionsResolutionStrategiesContainer; + private List distributionsResolutionStrategies; private Property dockerAvailability; @@ -76,8 +79,9 @@ public void apply(Project project) { } private void setupDistributionContainer(Project project, Property dockerAvailable) { + distributionsContainer = project.container(ElasticsearchDistribution.class, name -> { - Configuration fileConfiguration = project.getConfigurations().create("es_distro_file_" + name); + Configuration fileConfiguration = project.getConfigurations().create(DISTRO_CONFIG_PREFIX + name); Configuration extractedConfiguration = project.getConfigurations().create(DISTRO_EXTRACTED_CONFIG_PREFIX + name); extractedConfiguration.getAttributes() .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); @@ -85,21 +89,17 @@ private void setupDistributionContainer(Project project, Property docke name, project.getObjects(), dockerAvailability, - fileConfiguration, - extractedConfiguration, - (dist) -> finalizeDistributionDependencies(project, dist) + project.getObjects().fileCollection().from(fileConfiguration), + project.getObjects().fileCollection().from(extractedConfiguration), + new FinalizeDistributionAction(distributionsResolutionStrategies, project) ); }); project.getExtensions().add(CONTAINER_NAME, distributionsContainer); } private void setupResolutionsContainer(Project project) { - distributionsResolutionStrategiesContainer = project.container(DistributionResolution.class); - // We want this ordered in the same resolution strategies are added - distributionsResolutionStrategiesContainer.whenObjectAdded( - resolveDependencyNotation -> resolveDependencyNotation.setPriority(distributionsResolutionStrategiesContainer.size()) - ); - project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategiesContainer); + distributionsResolutionStrategies = new ArrayList<>(); + project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategies); } @SuppressWarnings("unchecked") @@ -108,30 +108,8 @@ public static NamedDomainObjectContainer getContainer } @SuppressWarnings("unchecked") - public static NamedDomainObjectContainer getRegistrationsContainer(Project project) { - return (NamedDomainObjectContainer) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME); - } - - private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) { - DependencyHandler dependencies = project.getDependencies(); - // for the distribution as a file, just depend on the artifact directly - DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution); - dependencies.add(distribution.configuration.getName(), distributionDependency.getDefaultNotation()); - // no extraction needed for rpm, deb or docker - if (distribution.getType().shouldExtract()) { - // The extracted configuration depends on the artifact directly but has - // an artifact transform registered to resolve it as an unpacked folder. - dependencies.add(distribution.getExtracted().getName(), distributionDependency.getExtractedNotation()); - } - } - - private DistributionDependency resolveDependencyNotation(Project p, ElasticsearchDistribution distribution) { - return distributionsResolutionStrategiesContainer.stream() - .sorted(Comparator.comparingInt(DistributionResolution::getPriority)) - .map(r -> r.getResolver().resolve(p, distribution)) - .filter(d -> d != null) - .findFirst() - .orElseGet(() -> DistributionDependency.of(dependencyNotation(distribution))); + public static List getRegistrationsContainer(Project project) { + return (List) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME); } private static void addIvyRepo(Project project, String name, String url, String group) { @@ -155,22 +133,53 @@ private static void setupDownloadServiceRepo(Project project) { addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://snapshots-no-kpi.elastic.co", FAKE_SNAPSHOT_IVY_GROUP); } - /** - * Returns a dependency object representing the given distribution. - *

- * The returned object is suitable to be passed to {@link DependencyHandler}. - * The concrete type of the object will be a set of maven coordinates as a {@link String}. - * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial - * coordinates that resolve to the Elastic download service through an ivy repository. - */ - private String dependencyNotation(ElasticsearchDistribution distribution) { - if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { - return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; + private record FinalizeDistributionAction(List resolutionList, Project project) + implements + Action { + @Override + + public void execute(ElasticsearchDistribution distro) { + finalizeDistributionDependencies(project, distro); + } + + private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) { + // for the distribution as a file, just depend on the artifact directly + DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution); + project.getDependencies().add(DISTRO_CONFIG_PREFIX + distribution.getName(), distributionDependency.getDefaultNotation()); + // no extraction needed for rpm, deb or docker + if (distribution.getType().shouldExtract()) { + // The extracted configuration depends on the artifact directly but has + // an artifact transform registered to resolve it as an unpacked folder. + project.getDependencies() + .add(DISTRO_EXTRACTED_CONFIG_PREFIX + distribution.getName(), distributionDependency.getExtractedNotation()); + } + } + + private DistributionDependency resolveDependencyNotation(Project project, ElasticsearchDistribution distro) { + return resolutionList.stream() + .map(r -> r.getResolver().resolve(project, distro)) + .filter(d -> d != null) + .findFirst() + .orElseGet(() -> DistributionDependency.of(dependencyNotation(distro))); + } + + /** + * Returns a dependency object representing the given distribution. + *

+ * The returned object is suitable to be passed to {@link DependencyHandler}. + * The concrete type of the object will be a set of maven coordinates as a {@link String}. + * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial + * coordinates that resolve to the Elastic download service through an ivy repository. + */ + private String dependencyNotation(ElasticsearchDistribution distribution) { + if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { + return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; + } + Version distroVersion = Version.fromString(distribution.getVersion()); + String extension = distribution.getType().getExtension(distribution.getPlatform()); + String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion); + String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; + return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } - Version distroVersion = Version.fromString(distribution.getVersion()); - String extension = distribution.getType().getExtension(distribution.getPlatform()); - String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion); - String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; - return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java index 3b82c9f6975a0..0d8177dea5cb6 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java @@ -12,9 +12,14 @@ public class DistributionResolution { private Resolver resolver; - private String name; + private final String name; private int priority; + public DistributionResolution(String name, Resolver resolver) { + this(name); + this.resolver = resolver; + } + public DistributionResolution(String name) { this.name = name; } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java index 5350b6698cb30..fab6926008d6c 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java @@ -11,7 +11,8 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.gradle.api.Action; import org.gradle.api.Buildable; -import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskDependency; @@ -44,7 +45,7 @@ public String toString() { private final String name; private final Property dockerAvailability; // pkg private so plugin can configure - final Configuration configuration; + final FileCollection configuration; private final Property architecture; private final Property version; @@ -52,16 +53,17 @@ public String toString() { private final Property platform; private final Property bundledJdk; private final Property failIfUnavailable; - private final Configuration extracted; - private Action distributionFinalizer; + private final Property preferArchive; + private final ConfigurableFileCollection extracted; + private transient Action distributionFinalizer; private boolean frozen = false; ElasticsearchDistribution( String name, ObjectFactory objectFactory, Property dockerAvailability, - Configuration fileConfiguration, - Configuration extractedConfiguration, + ConfigurableFileCollection fileConfiguration, + ConfigurableFileCollection extractedConfiguration, Action distributionFinalizer ) { this.name = name; @@ -74,6 +76,7 @@ public String toString() { this.platform = objectFactory.property(Platform.class); this.bundledJdk = objectFactory.property(Boolean.class); this.failIfUnavailable = objectFactory.property(Boolean.class).convention(true); + this.preferArchive = objectFactory.property(Boolean.class).convention(false); this.extracted = extractedConfiguration; this.distributionFinalizer = distributionFinalizer; } @@ -140,6 +143,14 @@ public void setFailIfUnavailable(boolean failIfUnavailable) { this.failIfUnavailable.set(failIfUnavailable); } + public boolean getPreferArchive() { + return preferArchive.get(); + } + + public void setPreferArchive(boolean preferArchive) { + this.preferArchive.set(preferArchive); + } + public void setArchitecture(Architecture architecture) { this.architecture.set(architecture); } @@ -172,7 +183,7 @@ public String getFilepath() { return configuration.getSingleFile().toString(); } - public Configuration getExtracted() { + public ConfigurableFileCollection getExtracted() { if (getType().shouldExtract() == false) { throw new UnsupportedOperationException( "distribution type [" + getType().getName() + "] for " + "elasticsearch distribution [" + name + "] cannot be extracted" @@ -187,7 +198,9 @@ public TaskDependency getBuildDependencies() { return task -> Collections.emptySet(); } else { maybeFreeze(); - return getType().shouldExtract() ? extracted.getBuildDependencies() : configuration.getBuildDependencies(); + return getType().shouldExtract() && (preferArchive.get() == false) + ? extracted.getBuildDependencies() + : configuration.getBuildDependencies(); } } @@ -252,13 +265,4 @@ void finalizeValues() { type.finalizeValue(); bundledJdk.finalizeValue(); } - - public TaskDependency getArchiveDependencies() { - if (skippingDockerDistributionBuild()) { - return task -> Collections.emptySet(); - } else { - maybeFreeze(); - return configuration.getBuildDependencies(); - } - } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java index 5c98ab3bf4364..e80d2ed64cabd 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java @@ -12,7 +12,7 @@ import java.util.Collection; import java.util.HashSet; -public class DefaultTestClustersTask extends DefaultTask implements TestClustersAware { +public abstract class DefaultTestClustersTask extends DefaultTask implements TestClustersAware { private Collection clusters = new HashSet<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index f6705bdb62faa..bf539efaf3c30 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -14,7 +14,11 @@ import org.gradle.api.Named; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; import org.gradle.api.file.ArchiveOperations; +import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.RegularFile; @@ -22,10 +26,15 @@ import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.gradle.api.tasks.bundling.Zip; import org.gradle.process.ExecOperations; @@ -35,6 +44,8 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.security.GeneralSecurityException; +import java.util.Collection; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -46,6 +57,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.EXPLODED_BUNDLE_CONFIG; +import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.BUNDLE_ATTRIBUTE; + public class ElasticsearchCluster implements TestClusterConfiguration, Named { private static final Logger LOGGER = Logging.getLogger(ElasticsearchNode.class); @@ -59,7 +73,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final FileOperations fileOperations; private final File workingDirBase; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); - private final Project project; + private final transient Project project; private final Provider reaper; private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; @@ -68,6 +82,10 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final Function isReleasedVersion; private int nodeIndex = 0; + private final ConfigurableFileCollection pluginAndModuleConfiguration; + + private boolean shared = false; + public ElasticsearchCluster( String path, String clusterName, @@ -93,6 +111,7 @@ public ElasticsearchCluster( this.runtimeJava = runtimeJava; this.isReleasedVersion = isReleasedVersion; this.nodes = project.container(ElasticsearchNode.class); + this.pluginAndModuleConfiguration = project.getObjects().fileCollection(); this.nodes.add( new ElasticsearchNode( safeName(clusterName), @@ -113,6 +132,29 @@ public ElasticsearchCluster( addWaitForClusterHealth(); } + /** + * this cluster si marked as shared across TestClusterAware tasks + * */ + @Internal + public boolean isShared() { + return shared; + } + + protected void setShared(boolean shared) { + this.shared = shared; + } + + @Classpath + public FileCollection getInstalledClasspath() { + return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")); + } + + @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) + public FileCollection getInstalledFiles() { + return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false); + } + public void setNumberOfNodes(int numberOfNodes) { checkFrozen(); @@ -195,34 +237,70 @@ public void setTestDistribution(TestDistribution distribution) { nodes.all(each -> each.setTestDistribution(distribution)); } - @Override - public void plugin(Provider plugin) { - nodes.all(each -> each.plugin(plugin)); + private void registerExtractedConfig(Provider pluginProvider) { + Dependency pluginDependency = this.project.getDependencies().create(project.files(pluginProvider)); + Configuration extractedConfig = project.getConfigurations().detachedConfiguration(pluginDependency); + extractedConfig.getAttributes().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); + extractedConfig.getAttributes().attribute(BUNDLE_ATTRIBUTE, true); + pluginAndModuleConfiguration.from(extractedConfig); } @Override + public void plugin(String pluginProjectPath) { + plugin(maybeCreatePluginOrModuleDependency(pluginProjectPath, "zip")); + } + public void plugin(TaskProvider plugin) { - nodes.all(each -> each.plugin(plugin)); + plugin(plugin.flatMap(AbstractArchiveTask::getArchiveFile)); } @Override - public void plugin(String pluginProjectPath) { - nodes.all(each -> each.plugin(pluginProjectPath)); + public void plugin(Provider plugin) { + registerExtractedConfig(plugin); + nodes.all(each -> each.plugin(plugin)); } @Override public void module(Provider module) { + registerExtractedConfig(module); nodes.all(each -> each.module(module)); } - @Override public void module(TaskProvider module) { - nodes.all(each -> each.module(module)); + module(project.getLayout().file(module.map(Sync::getDestinationDir))); } @Override public void module(String moduleProjectPath) { - nodes.all(each -> each.module(moduleProjectPath)); + module(maybeCreatePluginOrModuleDependency(moduleProjectPath, EXPLODED_BUNDLE_CONFIG)); + } + + private final Map pluginAndModuleConfigurations = new HashMap<>(); + + // package protected so only TestClustersAware can access + @Internal + Collection getPluginAndModuleConfigurations() { + return pluginAndModuleConfigurations.values(); + } + + // creates a configuration to depend on the given plugin project, then wraps that configuration + // to grab the zip as a file provider + private Provider maybeCreatePluginOrModuleDependency(String path, String consumingConfiguration) { + var configuration = pluginAndModuleConfigurations.computeIfAbsent(path, key -> { + var bundleDependency = this.project.getDependencies().project(Map.of("path", path, "configuration", consumingConfiguration)); + return project.getConfigurations().detachedConfiguration(bundleDependency); + }); + + Provider fileProvider = configuration.getElements() + .map( + s -> s.stream() + .findFirst() + .orElseThrow( + () -> new IllegalStateException(consumingConfiguration + " configuration of project " + path + " had no files") + ) + .getAsFile() + ); + return project.getLayout().file(fileProvider); } @Override @@ -579,4 +657,5 @@ public int hashCode() { public String toString() { return "cluster{" + path + ":" + clusterName + "}"; } + } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index f0ab67fe51a34..ce4fd7502f417 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -21,18 +21,12 @@ import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; -import org.elasticsearch.gradle.transform.UnzipTransform; import org.elasticsearch.gradle.util.Pair; import org.gradle.api.Action; import org.gradle.api.Named; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; -import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.Dependency; -import org.gradle.api.artifacts.type.ArtifactTypeDefinition; -import org.gradle.api.attributes.Attribute; import org.gradle.api.file.ArchiveOperations; -import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; @@ -52,7 +46,6 @@ import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskProvider; -import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.gradle.api.tasks.bundling.Zip; import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.process.ExecOperations; @@ -75,7 +68,6 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -98,7 +90,6 @@ import static java.util.Objects.requireNonNull; import static java.util.Optional.ofNullable; -import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.EXPLODED_BUNDLE_CONFIG; public class ElasticsearchNode implements TestClusterConfiguration { @@ -130,7 +121,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final String path; private final String name; - private final Project project; + transient private final Project project; private final Provider reaperServiceProvider; private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; @@ -140,8 +131,6 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Path workingDir; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); - private final Map pluginAndModuleConfigurations = new HashMap<>(); - private final ConfigurableFileCollection pluginAndModuleConfiguration; private final List> plugins = new ArrayList<>(); private final List> modules = new ArrayList<>(); private final LazyPropertyMap settings = new LazyPropertyMap<>("Settings", this); @@ -151,6 +140,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final LazyPropertyMap systemProperties = new LazyPropertyMap<>("System properties", this); private final LazyPropertyMap environment = new LazyPropertyMap<>("Environment", this); private final LazyPropertyList jvmArgs = new LazyPropertyList<>("JVM arguments", this); + private final LazyPropertyList cliJvmArgs = new LazyPropertyList<>("CLI JVM arguments", this); private final LazyPropertyMap extraConfigFiles = new LazyPropertyMap<>("Extra config files", this, FileEntry::new); private final LazyPropertyList extraJarConfigurations = new LazyPropertyList<>("Extra jar files", this); private final List> credentials = new ArrayList<>(); @@ -171,12 +161,10 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Provider runtimeJava; private final Function isReleasedVersion; private final List distributions = new ArrayList<>(); - private final Attribute bundleAttribute = Attribute.of("bundle", Boolean.class); - private int currentDistro = 0; private TestDistribution testDistribution; private volatile Process esProcess; - private Function nameCustomization = Function.identity(); + private Function nameCustomization = s -> s; private boolean isWorkingDirConfigured = false; private String httpPort = "0"; private String transportPort = "0"; @@ -223,10 +211,8 @@ public class ElasticsearchNode implements TestClusterConfiguration { waitConditions.put("ports files", this::checkPortsFilesExistWithDelay); defaultConfig.put("cluster.name", clusterName); - pluginAndModuleConfiguration = project.getObjects().fileCollection(); setTestDistribution(TestDistribution.INTEG_TEST); setVersion(VersionProperties.getElasticsearch()); - configureArtifactTransforms(); } @Input @@ -302,84 +288,34 @@ private void setDistributionType(ElasticsearchDistribution distribution, TestDis } } - // package protected so only TestClustersAware can access - @Internal - Collection getPluginAndModuleConfigurations() { - return pluginAndModuleConfigurations.values(); - } - - // creates a configuration to depend on the given plugin project, then wraps that configuration - // to grab the zip as a file provider - private Provider maybeCreatePluginOrModuleDependency(String path, String consumingConfiguration) { - var configuration = pluginAndModuleConfigurations.computeIfAbsent(path, key -> { - var bundleDependency = this.project.getDependencies().project(Map.of("path", path, "configuration", consumingConfiguration)); - return project.getConfigurations().detachedConfiguration(bundleDependency); - }); - - Provider fileProvider = configuration.getElements() - .map( - s -> s.stream() - .findFirst() - .orElseThrow( - () -> new IllegalStateException(consumingConfiguration + " configuration of project " + path + " had no files") - ) - .getAsFile() - ); - return project.getLayout().file(fileProvider); - } - @Override public void plugin(Provider plugin) { checkFrozen(); - registerExtractedConfig(plugin); this.plugins.add(plugin.map(RegularFile::getAsFile)); } @Override public void plugin(String pluginProjectPath) { - plugin(maybeCreatePluginOrModuleDependency(pluginProjectPath, "zip")); + throw new UnsupportedOperationException("Not Supported API"); } public void plugin(TaskProvider plugin) { - plugin(plugin.flatMap(AbstractArchiveTask::getArchiveFile)); + throw new UnsupportedOperationException("Not Supported API"); } @Override public void module(Provider module) { checkFrozen(); - registerExtractedConfig(module); this.modules.add(module.map(RegularFile::getAsFile)); } public void module(TaskProvider module) { - module(project.getLayout().file(module.map(Sync::getDestinationDir))); - } - - private void registerExtractedConfig(Provider pluginProvider) { - Dependency pluginDependency = this.project.getDependencies().create(project.files(pluginProvider)); - Configuration extractedConfig = project.getConfigurations().detachedConfiguration(pluginDependency); - extractedConfig.getAttributes().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); - extractedConfig.getAttributes().attribute(bundleAttribute, true); - pluginAndModuleConfiguration.from(extractedConfig); - } - - private void configureArtifactTransforms() { - project.getDependencies().getAttributesSchema().attribute(bundleAttribute); - project.getDependencies().getArtifactTypes().maybeCreate(ArtifactTypeDefinition.ZIP_TYPE); - project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { - transformSpec.getFrom() - .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.ZIP_TYPE) - .attribute(bundleAttribute, true); - transformSpec.getTo() - .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE) - .attribute(bundleAttribute, true); - transformSpec.getParameters().setAsFiletreeOutput(true); - }); + throw new IllegalStateException("Not Supported API"); } @Override public void module(String moduleProjectPath) { - module(maybeCreatePluginOrModuleDependency(moduleProjectPath, EXPLODED_BUNDLE_CONFIG)); + throw new IllegalStateException("Not Supported API"); } @Override @@ -471,6 +407,10 @@ public void jvmArgs(String... values) { jvmArgs.addAll(Arrays.asList(values)); } + public void cliJvmArgs(String... values) { + cliJvmArgs.addAll(Arrays.asList(values)); + } + @Internal public Path getConfigDir() { return configFile.getParent(); @@ -932,6 +872,10 @@ private void startElasticsearchProcess() { // Don't inherit anything from the environment for as that would lack reproducibility environment.clear(); environment.putAll(getESEnvironment()); + if (cliJvmArgs.isEmpty() == false) { + String cliJvmArgsString = String.join(" ", cliJvmArgs); + environment.put("CLI_JAVA_OPTS", cliJvmArgsString); + } // Direct the stderr to the ES log file. This should capture any jvm problems to start. // Stdout is discarded because ES duplicates the log file to stdout when run in the foreground. @@ -1551,17 +1495,6 @@ private Path getExtractedDistributionDir() { return distributions.get(currentDistro).getExtracted().getSingleFile().toPath(); } - @Classpath - public FileCollection getInstalledClasspath() { - return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")); - } - - @InputFiles - @PathSensitive(PathSensitivity.RELATIVE) - public FileCollection getInstalledFiles() { - return pluginAndModuleConfiguration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false); - } - @Classpath public List getDistributionClasspath() { return getDistributionFiles(filter -> filter.include("**/*.jar")); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java index 7ec74ee19d1bb..7c1d4b6015d2e 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java @@ -25,10 +25,10 @@ * This is a server which just accepts lines of JSON code and if the JSON * is valid and the root node is "transaction", then adds that JSON object * to a transaction list which is accessible externally to the class. - * + *

* The Elastic agent sends lines of JSON code, and so this mock server * can be used as a basic APM server for testing. - * + *

* The HTTP server used is the JDK embedded com.sun.net.httpserver */ public class MockApmServer { @@ -54,12 +54,16 @@ public static void main(String[] args) throws IOException, InterruptedException /** * Start the Mock APM server. Just returns empty JSON structures for every incoming message + * * @return - the port the Mock APM server started on * @throws IOException */ public synchronized int start() throws IOException { if (instance != null) { - throw new IOException("MockApmServer: Ooops, you can't start this instance more than once"); + String hostname = instance.getAddress().getHostName(); + int port = instance.getAddress().getPort(); + logger.lifecycle("MockApmServer is already running. Reusing on address:port " + hostname + ":" + port); + return port; } InetSocketAddress addr = new InetSocketAddress("0.0.0.0", port); HttpServer server = HttpServer.create(addr, 10); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index 953c0447ec71b..477842a201bb9 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -40,6 +40,7 @@ public abstract class RunTask extends DefaultTestClustersTask { private static final String transportCertificate = "private-cert2.p12"; private Boolean debug = false; + private Boolean cliDebug = false; private Boolean apmServerEnabled = false; private Boolean preserveData = false; @@ -62,11 +63,21 @@ public void setDebug(boolean enabled) { this.debug = enabled; } + @Option(option = "debug-cli-jvm", description = "Enable debugging configuration, to allow attaching a debugger to the cli launcher.") + public void setCliDebug(boolean enabled) { + this.cliDebug = enabled; + } + @Input public Boolean getDebug() { return debug; } + @Input + public Boolean getCliDebug() { + return cliDebug; + } + @Input public Boolean getApmServerEnabled() { return apmServerEnabled; @@ -194,7 +205,9 @@ public void beforeStart() { } catch (IOException e) { logger.warn("Unable to start APM server", e); } - + } else { + // metrics are enabled by default, if the --with-apm-server was not used we should disable it + node.setting("telemetry.metrics.enabled", "false"); } } @@ -202,6 +215,9 @@ public void beforeStart() { if (debug) { enableDebug(); } + if (cliDebug) { + enableCliDebug(); + } } @TaskAction diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 2bd8219dc48e5..5e6b33aa980f0 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -8,11 +8,9 @@ package org.elasticsearch.gradle.testclusters; import org.elasticsearch.gradle.FileSystemOperationsAware; -import org.gradle.api.Task; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.services.internal.BuildServiceProvider; import org.gradle.api.services.internal.BuildServiceRegistryInternal; -import org.gradle.api.specs.NotSpec; -import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; @@ -28,6 +26,8 @@ import java.util.HashSet; import java.util.List; +import javax.inject.Inject; + import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.THROTTLE_SERVICE_NAME; /** @@ -42,23 +42,6 @@ public abstract class StandaloneRestIntegTestTask extends Test implements TestCl private boolean debugServer = false; public StandaloneRestIntegTestTask() { - Spec taskSpec = t -> getProject().getTasks() - .withType(StandaloneRestIntegTestTask.class) - .stream() - .filter(task -> task != this) - .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false); - this.getOutputs() - .doNotCacheIf( - "Caching disabled for this task since it uses a cluster shared by other tasks", - /* - * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster - * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To - * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between - * multiple tasks. - */ - taskSpec - ); - this.getOutputs().upToDateWhen(new NotSpec(taskSpec)); this.getOutputs() .doNotCacheIf( "Caching disabled for this task since it is configured to preserve data directory", @@ -79,16 +62,22 @@ public Collection getClusters() { return clusters; } + @Override + @Inject + public abstract ProviderFactory getProviderFactory(); + @Override @Internal public List getSharedResources() { + // Since we need to have the buildservice registered for configuration cache compatibility, + // we already get one lock for throttle service List locks = new ArrayList<>(super.getSharedResources()); BuildServiceRegistryInternal serviceRegistry = getServices().get(BuildServiceRegistryInternal.class); BuildServiceProvider serviceProvider = serviceRegistry.consume(THROTTLE_SERVICE_NAME, TestClustersThrottle.class); SharedResource resource = serviceRegistry.forService(serviceProvider); int nodeCount = clusters.stream().mapToInt(cluster -> cluster.getNodes().size()).sum(); if (nodeCount > 0) { - for (int i = 0; i < Math.min(nodeCount, resource.getMaxUsages()); i++) { + for (int i = 0; i < Math.min(nodeCount, resource.getMaxUsages() - 1); i++) { locks.add(resource.getResourceLock()); } } @@ -101,6 +90,7 @@ public WorkResult delete(Object... objects) { @Override public void beforeStart() { + TestClustersAware.super.beforeStart(); if (debugServer) { enableDebug(); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 9537162b5d109..3fef77688c48d 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -8,18 +8,27 @@ package org.elasticsearch.gradle.testclusters; import org.gradle.api.Task; -import org.gradle.api.artifacts.Configuration; +import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; +import org.gradle.api.services.ServiceReference; import org.gradle.api.tasks.Nested; import java.util.Collection; -import java.util.concurrent.Callable; + +import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.REGISTRY_SERVICE_NAME; +import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.TEST_CLUSTER_TASKS_SERVICE; public interface TestClustersAware extends Task { @Nested Collection getClusters(); + @ServiceReference(REGISTRY_SERVICE_NAME) + Property getRegistery(); + + @ServiceReference(TEST_CLUSTER_TASKS_SERVICE) + Property getTasksService(); + default void useCluster(ElasticsearchCluster cluster) { if (cluster.getPath().equals(getProject().getPath()) == false) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); @@ -27,7 +36,7 @@ default void useCluster(ElasticsearchCluster cluster) { cluster.getNodes() .all(node -> node.getDistributions().forEach(distro -> dependsOn(getProject().provider(() -> distro.maybeFreeze())))); - cluster.getNodes().all(node -> dependsOn((Callable>) node::getPluginAndModuleConfigurations)); + dependsOn(cluster.getPluginAndModuleConfigurations()); getClusters().add(cluster); } @@ -35,7 +44,9 @@ default void useCluster(Provider cluster) { useCluster(cluster.get()); } - default void beforeStart() {} + default void beforeStart() { + getTasksService().get().register(this); + } default void enableDebug() { int debugPort = 5007; @@ -47,4 +58,19 @@ default void enableDebug() { } } } + + default void enableCliDebug() { + int cliDebugPort = 5107; + for (ElasticsearchCluster cluster : getClusters()) { + for (ElasticsearchNode node : cluster.getNodes()) { + getLogger().lifecycle( + "Running cli launcher in debug mode, {} expecting running debug server on port {}", + node, + cliDebugPort + ); + node.cliJvmArgs("-agentlib:jdwp=transport=dt_socket,server=n,suspend=y,address=" + cliDebugPort); + cliDebugPort += 1; + } + } + } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 72a462c3cd8c9..d4ae65d43893a 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -11,11 +11,14 @@ import org.elasticsearch.gradle.ReaperPlugin; import org.elasticsearch.gradle.ReaperService; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.transform.UnzipTransform; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.attributes.Attribute; import org.gradle.api.file.ArchiveOperations; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.internal.file.FileOperations; @@ -45,12 +48,15 @@ public class TestClustersPlugin implements Plugin { + public static final Attribute BUNDLE_ATTRIBUTE = Attribute.of("bundle", Boolean.class); + public static final String EXTENSION_NAME = "testClusters"; public static final String THROTTLE_SERVICE_NAME = "testClustersThrottle"; private static final String LIST_TASK_NAME = "listTestClusters"; - private static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; + public static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); + public static final String TEST_CLUSTER_TASKS_SERVICE = "testClusterTasksService"; private final ProviderFactory providerFactory; private Provider runtimeJavaProvider; private Function isReleasedVersion = v -> true; @@ -109,7 +115,7 @@ public void apply(Project project) { project.getGradle().getSharedServices().registerIfAbsent(REGISTRY_SERVICE_NAME, TestClustersRegistry.class, noop()); // register throttle so we only run at most max-workers/2 nodes concurrently - project.getGradle() + Provider testClustersThrottleProvider = project.getGradle() .getSharedServices() .registerIfAbsent( THROTTLE_SERVICE_NAME, @@ -117,8 +123,23 @@ public void apply(Project project) { spec -> spec.getMaxParallelUsages().set(Math.max(1, project.getGradle().getStartParameter().getMaxWorkerCount() / 2)) ); - // register cluster hooks + project.getTasks().withType(TestClustersAware.class).configureEach(task -> { task.usesService(testClustersThrottleProvider); }); project.getRootProject().getPluginManager().apply(TestClustersHookPlugin.class); + configureArtifactTransforms(project); + } + + private void configureArtifactTransforms(Project project) { + project.getDependencies().getAttributesSchema().attribute(BUNDLE_ATTRIBUTE); + project.getDependencies().getArtifactTypes().maybeCreate(ArtifactTypeDefinition.ZIP_TYPE); + project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { + transformSpec.getFrom() + .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.ZIP_TYPE) + .attribute(BUNDLE_ATTRIBUTE, true); + transformSpec.getTo() + .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE) + .attribute(BUNDLE_ATTRIBUTE, true); + transformSpec.getParameters().setAsFiletreeOutput(true); + }); } private NamedDomainObjectContainer createTestClustersContainerExtension( @@ -156,13 +177,13 @@ private void createListClustersTask(Project project, NamedDomainObjectContainer< (Task t) -> container.forEach(cluster -> logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getNumberOfNodes())) ); }); - } static abstract class TestClustersHookPlugin implements Plugin { @Inject public abstract BuildEventsListenerRegistry getEventsListenerRegistry(); + @SuppressWarnings("checkstyle:RedundantModifier") @Inject public TestClustersHookPlugin() {} @@ -177,10 +198,9 @@ public void apply(Project project) { Provider testClusterTasksService = project.getGradle() .getSharedServices() - .registerIfAbsent("testClusterTasksService", TaskEventsService.class, spec -> {}); + .registerIfAbsent(TEST_CLUSTER_TASKS_SERVICE, TaskEventsService.class, spec -> {}); TestClustersRegistry registry = registryProvider.get(); - // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters // that are defined in the build script and the ones that will actually be used in this invocation of gradle // we use this information to determine when the last task that required the cluster executed so that we can @@ -219,10 +239,9 @@ private void configureStartClustersHook( .filter(task -> task instanceof TestClustersAware) .map(task -> (TestClustersAware) task) .forEach(awareTask -> { - testClusterTasksService.get().register(awareTask.getPath(), awareTask); awareTask.doFirst(task -> { awareTask.beforeStart(); - awareTask.getClusters().forEach(registry::maybeStartCluster); + awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster); }); }); }); @@ -234,12 +253,12 @@ static public abstract class TaskEventsService implements BuildService tasksMap = new HashMap<>(); private TestClustersRegistry registryProvider; - public void register(String path, TestClustersAware task) { - tasksMap.put(path, task); + public void register(TestClustersAware task) { + tasksMap.put(task.getPath(), task); } public void registry(TestClustersRegistry registry) { - registryProvider = registry; + this.registryProvider = registry; } @Override diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java index b46e86ca84bdd..1d6efdabcd59f 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java @@ -22,11 +22,16 @@ public abstract class TestClustersRegistry implements BuildService claimsInventory = new HashMap<>(); + private final Set runningClusters = new HashSet<>(); public void claimCluster(ElasticsearchCluster cluster) { cluster.freeze(); - claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) + 1); + int claim = claimsInventory.getOrDefault(cluster, 0) + 1; + claimsInventory.put(cluster, claim); + if (claim > 1) { + cluster.setShared(true); + } } public void maybeStartCluster(ElasticsearchCluster cluster) { @@ -63,7 +68,6 @@ public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { } else { int currentClaims = claimsInventory.getOrDefault(cluster, 0) - 1; claimsInventory.put(cluster, currentClaims); - if (currentClaims <= 0 && runningClusters.contains(cluster)) { cluster.stop(false); runningClusters.remove(cluster); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java index 6ba3261691cb9..4dfd950e0aaf1 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersThrottle.java @@ -10,4 +10,6 @@ import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceParameters; -public abstract class TestClustersThrottle implements BuildService {} +public abstract class TestClustersThrottle implements BuildService { + +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java index ce69c4ec476f9..00e5834b0f826 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java @@ -13,7 +13,6 @@ import org.gradle.api.Task; import org.gradle.api.UnknownTaskException; import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.Dependency; import org.gradle.api.artifacts.ModuleDependency; import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.plugins.JavaBasePlugin; @@ -34,7 +33,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -183,16 +181,6 @@ public static void extendSourceSet(Project project, String parentSourceSetName, } } - public static Dependency projectDependency(Project project, String projectPath, String projectConfig) { - if (project.findProject(projectPath) == null) { - throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects()); - } - Map depConfig = new HashMap<>(); - depConfig.put("path", projectPath); - depConfig.put("configuration", projectConfig); - return project.getDependencies().project(depConfig); - } - /** * To calculate the project path from a task path without relying on Task#getProject() which is discouraged during * task execution time. diff --git a/build.gradle b/build.gradle index d05c2bf53f660..c0b613beefea4 100644 --- a/build.gradle +++ b/build.gradle @@ -29,8 +29,8 @@ plugins { id 'lifecycle-base' id 'elasticsearch.docker-support' id 'elasticsearch.global-build-info' - id 'elasticsearch.build-scan' id 'elasticsearch.build-complete' + id 'elasticsearch.build-scan' id 'elasticsearch.jdk-download' id 'elasticsearch.internal-distribution-download' id 'elasticsearch.runtime-jdk-provision' @@ -161,8 +161,10 @@ tasks.register("verifyVersions") { String versionMapping = backportConfig.get("branchLabelMapping").fields().find { it.value.textValue() == 'main' }.key String expectedMapping = "^v${versions.elasticsearch.replaceAll('-SNAPSHOT', '')}\$" if (versionMapping != expectedMapping) { - throw new GradleException("Backport label mapping for branch 'main' is '${versionMapping}' but should be " + - "'${expectedMapping}'. Update .backportrc.json.") + throw new GradleException( + "Backport label mapping for branch 'main' is '${versionMapping}' but should be " + + "'${expectedMapping}'. Update .backportrc.json." + ) } } } @@ -211,9 +213,9 @@ allprojects { project.ext { // for ide hacks... isEclipse = providers.systemProperty("eclipse.launcher").isPresent() || // Detects gradle launched from Eclipse's IDE - providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server - gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff - gradle.startParameter.taskNames.contains('cleanEclipse') + providers.systemProperty("eclipse.application").isPresent() || // Detects gradle launched from the Eclipse compiler server + gradle.startParameter.taskNames.contains('eclipse') || // Detects gradle launched from the command line to do eclipse stuff + gradle.startParameter.taskNames.contains('cleanEclipse') } ext.bwc_tests_enabled = bwc_tests_enabled @@ -229,10 +231,10 @@ allprojects { eclipse.classpath.file.whenMerged { classpath -> if (false == forbiddenApisTest.bundledSignatures.contains('jdk-non-portable')) { classpath.entries - .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } - .each { - it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) - } + .findAll { it.kind == "con" && it.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") } + .each { + it.accessRules.add(new AccessRule("accessible", "com/sun/net/httpserver/*")) + } } } } @@ -248,6 +250,8 @@ allprojects { plugins.withId('lifecycle-base') { if (project.path.startsWith(":x-pack:")) { if (project.path.contains("security") || project.path.contains(":ml")) { + tasks.register('checkPart4') { dependsOn 'check' } + } else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) { tasks.register('checkPart3') { dependsOn 'check' } } else { tasks.register('checkPart2') { dependsOn 'check' } @@ -256,7 +260,7 @@ allprojects { tasks.register('checkPart1') { dependsOn 'check' } } - tasks.register('functionalTests') { dependsOn 'check'} + tasks.register('functionalTests') { dependsOn 'check' } } /* @@ -281,7 +285,7 @@ allprojects { // :test:framework:test cannot run before and after :server:test return } - tasks.matching { it.name.equals('integTest')}.configureEach {integTestTask -> + tasks.matching { it.name.equals('integTest') }.configureEach { integTestTask -> integTestTask.mustRunAfter tasks.matching { it.name.equals("test") } } @@ -290,7 +294,7 @@ allprojects { Project upstreamProject = dep.dependencyProject if (project.path != upstreamProject?.path) { for (String taskName : ['test', 'integTest']) { - project.tasks.matching { it.name == taskName }.configureEach {task -> + project.tasks.matching { it.name == taskName }.configureEach { task -> task.shouldRunAfter(upstreamProject.tasks.matching { upStreamTask -> upStreamTask.name == taskName }) } } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 743f64b3b28d3..bcbc73f643298 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -61,11 +61,6 @@ tasks.named('forbiddenApisMain').configure { signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') } -tasks.named('splitPackagesAudit').configure { - // the client package should be owned by the client, but server has some classes there too - ignoreClasses 'org.elasticsearch.client.*' -} - // we don't have tests now, as HLRC is in the process of being removed tasks.named("test").configure {enabled = false } diff --git a/client/rest-high-level/roles.yml b/client/rest-high-level/roles.yml deleted file mode 100644 index d3d0630f43058..0000000000000 --- a/client/rest-high-level/roles.yml +++ /dev/null @@ -1,12 +0,0 @@ -admin: - cluster: - - all - indices: - - names: '*' - privileges: - - all - run_as: [ '*' ] - applications: - - application: '*' - privileges: [ '*' ] - resources: [ '*' ] diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java deleted file mode 100644 index fca1e5d29efaf..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.charset.Charset; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.StringJoiner; - -final class RequestConverters { - static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; - - private RequestConverters() { - // Contains only status utility methods - } - - static Request bulk(BulkRequest bulkRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_bulk"); - - Params parameters = new Params(); - parameters.withTimeout(bulkRequest.timeout()); - parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); - parameters.withPipeline(bulkRequest.pipeline()); - parameters.withRouting(bulkRequest.routing()); - // Bulk API only supports newline delimited JSON or Smile. Before executing - // the bulk, we need to check that all requests have the same content-type - // and this content-type is supported by the Bulk API. - XContentType bulkContentType = null; - for (int i = 0; i < bulkRequest.numberOfActions(); i++) { - DocWriteRequest action = bulkRequest.requests().get(i); - - DocWriteRequest.OpType opType = action.opType(); - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType); - - } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) action; - if (updateRequest.doc() != null) { - bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); - } - if (updateRequest.upsertRequest() != null) { - bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType); - } - } - } - - if (bulkContentType == null) { - bulkContentType = XContentType.JSON; - } - - final byte separator = bulkContentType.xContent().streamSeparator(); - final ContentType requestContentType = createContentType(bulkContentType); - - ByteArrayOutputStream content = new ByteArrayOutputStream(); - for (DocWriteRequest action : bulkRequest.requests()) { - DocWriteRequest.OpType opType = action.opType(); - - try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { - metadata.startObject(); - { - metadata.startObject(opType.getLowercase()); - if (Strings.hasLength(action.index())) { - metadata.field("_index", action.index()); - } - if (Strings.hasLength(action.id())) { - metadata.field("_id", action.id()); - } - if (Strings.hasLength(action.routing())) { - metadata.field("routing", action.routing()); - } - if (action.version() != Versions.MATCH_ANY) { - metadata.field("version", action.version()); - } - - VersionType versionType = action.versionType(); - if (versionType != VersionType.INTERNAL) { - if (versionType == VersionType.EXTERNAL) { - metadata.field("version_type", "external"); - } else if (versionType == VersionType.EXTERNAL_GTE) { - metadata.field("version_type", "external_gte"); - } - } - - if (action.ifSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - metadata.field("if_seq_no", action.ifSeqNo()); - metadata.field("if_primary_term", action.ifPrimaryTerm()); - } - - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) action; - if (Strings.hasLength(indexRequest.getPipeline())) { - metadata.field("pipeline", indexRequest.getPipeline()); - } - } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = (UpdateRequest) action; - if (updateRequest.retryOnConflict() > 0) { - metadata.field("retry_on_conflict", updateRequest.retryOnConflict()); - } - if (updateRequest.fetchSource() != null) { - metadata.field("_source", updateRequest.fetchSource()); - } - } - metadata.endObject(); - } - metadata.endObject(); - - BytesRef metadataSource = BytesReference.bytes(metadata).toBytesRef(); - content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length); - content.write(separator); - } - - BytesRef source = null; - if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { - IndexRequest indexRequest = (IndexRequest) action; - BytesReference indexSource = indexRequest.source(); - XContentType indexXContentType = indexRequest.getContentType(); - - try ( - XContentParser parser = XContentHelper.createParser( - /* - * EMPTY and THROW are fine here because we just call - * copyCurrentStructure which doesn't touch the - * registry or deprecation. - */ - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - indexSource, - indexXContentType - ) - ) { - try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) { - builder.copyCurrentStructure(parser); - source = BytesReference.bytes(builder).toBytesRef(); - } - } - } else if (opType == DocWriteRequest.OpType.UPDATE) { - source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef(); - } - - if (source != null) { - content.write(source.bytes, source.offset, source.length); - content.write(separator); - } - } - request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); - return request; - } - - static Request index(IndexRequest indexRequest) { - String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; - - String endpoint; - if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { - endpoint = endpoint(indexRequest.index(), "_create", indexRequest.id()); - } else { - endpoint = endpoint(indexRequest.index(), indexRequest.id()); - } - - Request request = new Request(method, endpoint); - - Params parameters = new Params(); - parameters.withRouting(indexRequest.routing()); - parameters.withTimeout(indexRequest.timeout()); - parameters.withVersion(indexRequest.version()); - parameters.withVersionType(indexRequest.versionType()); - parameters.withIfSeqNo(indexRequest.ifSeqNo()); - parameters.withIfPrimaryTerm(indexRequest.ifPrimaryTerm()); - parameters.withPipeline(indexRequest.getPipeline()); - parameters.withRefreshPolicy(indexRequest.getRefreshPolicy()); - parameters.withWaitForActiveShards(indexRequest.waitForActiveShards()); - parameters.withRequireAlias(indexRequest.isRequireAlias()); - - BytesRef source = indexRequest.source().toBytesRef(); - ContentType contentType = createContentType(indexRequest.getContentType()); - request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); - return request; - } - - /** - * Convert a {@linkplain SearchRequest} into a {@linkplain Request}. - * @param searchRequest the request to convert - * @param searchEndpoint the name of the search endpoint. {@literal _search} - * for standard searches and {@literal _rollup_search} for rollup - * searches. - */ - static Request search(SearchRequest searchRequest, String searchEndpoint) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchEndpoint)); - - Params params = new Params(); - addSearchRequestParams(params, searchRequest); - - if (searchRequest.source() != null) { - request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); - } - request.addParameters(params.asMap()); - return request; - } - - static void addSearchRequestParams(Params params, SearchRequest searchRequest) { - params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); - params.withRouting(searchRequest.routing()); - params.withPreference(searchRequest.preference()); - if (SearchRequest.DEFAULT_INDICES_OPTIONS.equals(searchRequest.indicesOptions()) == false) { - params.withIndicesOptions(searchRequest.indicesOptions()); - } - params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - if (searchRequest.isCcsMinimizeRoundtrips() != SearchRequest.defaultCcsMinimizeRoundtrips(searchRequest)) { - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); - } - if (searchRequest.getPreFilterShardSize() != null) { - params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); - } - params.withMaxConcurrentShardRequests(searchRequest.getMaxConcurrentShardRequests()); - if (searchRequest.requestCache() != null) { - params.withRequestCache(searchRequest.requestCache()); - } - if (searchRequest.allowPartialSearchResults() != null) { - params.withAllowPartialResults(searchRequest.allowPartialSearchResults()); - } - params.withBatchedReduceSize(searchRequest.getBatchedReduceSize()); - if (searchRequest.scroll() != null) { - params.putParam("scroll", searchRequest.scroll().keepAlive()); - } - } - - static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_search/scroll"); - request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { - return createEntity(toXContent, xContentType, ToXContent.EMPTY_PARAMS); - } - - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) - throws IOException { - BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); - return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); - } - - @Deprecated - static String endpoint(String index, String type, String id) { - return new EndpointBuilder().addPathPart(index, type, id).build(); - } - - static String endpoint(String index, String id) { - return new EndpointBuilder().addPathPart(index, "_doc", id).build(); - } - - @Deprecated - static String endpoint(String index, String type, String id, String endpoint) { - return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build(); - } - - static String endpoint(String[] indices, String endpoint) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); - } - - @Deprecated - static String endpoint(String[] indices, String[] types, String endpoint) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices) - .addCommaSeparatedPathParts(types) - .addPathPartAsIs(endpoint) - .build(); - } - - @Deprecated - static String endpoint(String[] indices, String endpoint, String type) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type).build(); - } - - /** - * Returns a {@link ContentType} from a given {@link XContentType}. - * - * @param xContentType the {@link XContentType} - * @return the {@link ContentType} - */ - @SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType") - public static ContentType createContentType(final XContentType xContentType) { - return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null); - } - - /** - * Utility class to help with common parameter names and patterns. Wraps - * a {@link Request} and adds the parameters to it directly. - */ - static class Params { - private final Map parameters = new HashMap<>(); - - Params() {} - - Params putParam(String name, String value) { - if (Strings.hasLength(value)) { - parameters.put(name, value); - } - return this; - } - - Params putParam(String key, TimeValue value) { - if (value != null) { - return putParam(key, value.getStringRep()); - } - return this; - } - - Map asMap() { - return parameters; - } - - Params withPipeline(String pipeline) { - return putParam("pipeline", pipeline); - } - - Params withPreference(String preference) { - return putParam("preference", preference); - } - - Params withSearchType(String searchType) { - return putParam("search_type", searchType); - } - - Params withMaxConcurrentShardRequests(int maxConcurrentShardRequests) { - return putParam("max_concurrent_shard_requests", Integer.toString(maxConcurrentShardRequests)); - } - - Params withBatchedReduceSize(int batchedReduceSize) { - return putParam("batched_reduce_size", Integer.toString(batchedReduceSize)); - } - - Params withRequestCache(boolean requestCache) { - return putParam("request_cache", Boolean.toString(requestCache)); - } - - Params withAllowPartialResults(boolean allowPartialSearchResults) { - return putParam("allow_partial_search_results", Boolean.toString(allowPartialSearchResults)); - } - - Params withRefreshPolicy(RefreshPolicy refreshPolicy) { - if (refreshPolicy != RefreshPolicy.NONE) { - return putParam("refresh", refreshPolicy.getValue()); - } - return this; - } - - Params withRouting(String routing) { - return putParam("routing", routing); - } - - Params withTimeout(TimeValue timeout) { - return putParam("timeout", timeout); - } - - Params withVersion(long version) { - if (version != Versions.MATCH_ANY) { - return putParam("version", Long.toString(version)); - } - return this; - } - - Params withVersionType(VersionType versionType) { - if (versionType != VersionType.INTERNAL) { - return putParam("version_type", versionType.name().toLowerCase(Locale.ROOT)); - } - return this; - } - - Params withIfSeqNo(long ifSeqNo) { - if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { - return putParam("if_seq_no", Long.toString(ifSeqNo)); - } - return this; - } - - Params withIfPrimaryTerm(long ifPrimaryTerm) { - if (ifPrimaryTerm != SequenceNumbers.UNASSIGNED_PRIMARY_TERM) { - return putParam("if_primary_term", Long.toString(ifPrimaryTerm)); - } - return this; - } - - Params withWaitForActiveShards(ActiveShardCount activeShardCount) { - return withWaitForActiveShards(activeShardCount, ActiveShardCount.DEFAULT); - } - - Params withWaitForActiveShards(ActiveShardCount activeShardCount, ActiveShardCount defaultActiveShardCount) { - if (activeShardCount != null && activeShardCount != defaultActiveShardCount) { - return putParam("wait_for_active_shards", activeShardCount.toString().toLowerCase(Locale.ROOT)); - } - return this; - } - - Params withRequireAlias(boolean requireAlias) { - if (requireAlias) { - return putParam("require_alias", Boolean.toString(requireAlias)); - } - return this; - } - - Params withIndicesOptions(IndicesOptions indicesOptions) { - if (indicesOptions != null) { - withIgnoreUnavailable(indicesOptions.ignoreUnavailable()); - putParam("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); - String expandWildcards; - if (indicesOptions.expandWildcardExpressions() == false) { - expandWildcards = "none"; - } else { - StringJoiner joiner = new StringJoiner(","); - if (indicesOptions.expandWildcardsOpen()) { - joiner.add("open"); - } - if (indicesOptions.expandWildcardsClosed()) { - joiner.add("closed"); - } - expandWildcards = joiner.toString(); - } - putParam("expand_wildcards", expandWildcards); - putParam("ignore_throttled", Boolean.toString(indicesOptions.ignoreThrottled())); - } - return this; - } - - Params withIgnoreUnavailable(boolean ignoreUnavailable) { - // Always explicitly place the ignore_unavailable value. - putParam("ignore_unavailable", Boolean.toString(ignoreUnavailable)); - return this; - } - } - - /** - * Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms - * to the current {@link BulkRequest}'s content type (if it's known at the time of this method get called). - * - * @return the {@link IndexRequest}'s content type - */ - static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { - XContentType requestContentType = indexRequest.getContentType(); - if (requestContentType.canonical() != XContentType.JSON && requestContentType.canonical() != XContentType.SMILE) { - throw new IllegalArgumentException( - "Unsupported content-type found for request with content-type [" - + requestContentType - + "], only JSON and SMILE are supported" - ); - } - if (xContentType == null) { - return requestContentType; - } - if (requestContentType.canonical() != xContentType.canonical()) { - throw new IllegalArgumentException( - "Mismatching content-type found for request with content-type [" - + requestContentType - + "], previous requests have content-type [" - + xContentType - + "]" - ); - } - return xContentType; - } - - /** - * Utility class to build request's endpoint given its parts as strings - */ - static class EndpointBuilder { - - private final StringJoiner joiner = new StringJoiner("/", "/", ""); - - EndpointBuilder addPathPart(String... parts) { - for (String part : parts) { - if (Strings.hasLength(part)) { - joiner.add(encodePart(part)); - } - } - return this; - } - - EndpointBuilder addCommaSeparatedPathParts(String[] parts) { - addPathPart(String.join(",", parts)); - return this; - } - - EndpointBuilder addPathPartAsIs(String... parts) { - for (String part : parts) { - if (Strings.hasLength(part)) { - joiner.add(part); - } - } - return this; - } - - String build() { - return joiner.toString(); - } - - private static String encodePart(String pathPart) { - try { - // encode each part (e.g. index, type and id) separately before merging them into the path - // we prepend "/" to the path part to make this path absolute, otherwise there can be issues with - // paths that start with `-` or contain `:` - // the authority must be an empty string and not null, else paths that being with slashes could have them - // misinterpreted as part of the authority. - URI uri = new URI(null, "", "/" + pathPart, null, null); - // manually encode any slash that each part may contain - return uri.getRawPath().substring(1).replaceAll("/", "%2F"); - } catch (URISyntaxException e) { - throw new IllegalArgumentException("Path part [" + pathPart + "] couldn't be encoded", e); - } - } - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java deleted file mode 100644 index b0998957910a2..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder; -import org.elasticsearch.aggregations.bucket.adjacency.ParsedAdjacencyMatrix; -import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; -import org.elasticsearch.aggregations.bucket.histogram.ParsedAutoDateHistogram; -import org.elasticsearch.aggregations.bucket.timeseries.ParsedTimeSeries; -import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; -import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; -import org.elasticsearch.client.analytics.ParsedStringStats; -import org.elasticsearch.client.analytics.ParsedTopMetrics; -import org.elasticsearch.client.analytics.StringStatsAggregationBuilder; -import org.elasticsearch.client.analytics.TopMetricsAggregationBuilder; -import org.elasticsearch.client.core.MainResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.plugins.spi.NamedXContentProvider; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.composite.ParsedComposite; -import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter; -import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; -import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedVariableWidthHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.VariableWidthHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.missing.ParsedMissing; -import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.nested.ParsedNested; -import org.elasticsearch.search.aggregations.bucket.nested.ParsedReverseNested; -import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.DateRangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.IpRangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.range.ParsedBinaryRange; -import org.elasticsearch.search.aggregations.bucket.range.ParsedDateRange; -import org.elasticsearch.search.aggregations.bucket.range.ParsedGeoDistance; -import org.elasticsearch.search.aggregations.bucket.range.ParsedRange; -import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; -import org.elasticsearch.search.aggregations.bucket.sampler.ParsedSampler; -import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms; -import org.elasticsearch.search.aggregations.bucket.terms.LongRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.SignificantLongTerms; -import org.elasticsearch.search.aggregations.bucket.terms.SignificantStringTerms; -import org.elasticsearch.search.aggregations.bucket.terms.StringRareTerms; -import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; -import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.GeoBoundsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ParsedAvg; -import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; -import org.elasticsearch.search.aggregations.metrics.ParsedExtendedStats; -import org.elasticsearch.search.aggregations.metrics.ParsedGeoBounds; -import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid; -import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; -import org.elasticsearch.search.aggregations.metrics.ParsedMax; -import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation; -import org.elasticsearch.search.aggregations.metrics.ParsedMin; -import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; -import org.elasticsearch.search.aggregations.metrics.ParsedStats; -import org.elasticsearch.search.aggregations.metrics.ParsedSum; -import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentileRanks; -import org.elasticsearch.search.aggregations.metrics.ParsedTDigestPercentiles; -import org.elasticsearch.search.aggregations.metrics.ParsedTopHits; -import org.elasticsearch.search.aggregations.metrics.ParsedValueCount; -import org.elasticsearch.search.aggregations.metrics.ParsedWeightedAvg; -import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; -import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.search.suggest.completion.CompletionSuggestion; -import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestion; -import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; -import org.elasticsearch.xcontent.ContextParser; -import org.elasticsearch.xcontent.DeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.ServiceLoader; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.util.Collections.emptySet; -import static java.util.stream.Collectors.toList; - -/** - * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. The - * {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically when - * closing the {@link RestHighLevelClient} instance that wraps it. - *

- * - * In case an already existing instance of a low-level REST client needs to be provided, this class can be subclassed and the - * {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used. - *

- * - * This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins, - * or to add support for custom response sections, again added to Elasticsearch through plugins. - *

- * - * The majority of the methods in this class come in two flavors, a blocking and an asynchronous version (e.g. - * {@link #search(SearchRequest, RequestOptions)} and {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)}, where the later - * takes an implementation of an {@link ActionListener} as an argument that needs to implement methods that handle successful responses and - * failure scenarios. Most of the blocking calls can throw an {@link IOException} or an unchecked {@link ElasticsearchException} in the - * following cases: - * - *

    - *
  • an {@link IOException} is usually thrown in case of failing to parse the REST response in the high-level REST client, the request - * times out or similar cases where there is no response coming back from the Elasticsearch server
  • - *
  • an {@link ElasticsearchException} is usually thrown in case where the server returns a 4xx or 5xx error code. The high-level client - * then tries to parse the response body error details into a generic ElasticsearchException and suppresses the original - * {@link ResponseException}
  • - *
- * - * @deprecated The High Level Rest Client is deprecated in favor of the - * - * Elasticsearch Java API Client - */ -@Deprecated(since = "7.16.0", forRemoval = true) -@SuppressWarnings("removal") -public class RestHighLevelClient implements Closeable { - - private static final Logger logger = LogManager.getLogger(RestHighLevelClient.class); - /** - * Environment variable determining whether to send the 7.x compatibility header - */ - public static final String API_VERSIONING_ENV_VARIABLE = "ELASTIC_CLIENT_APIVERSIONING"; - - // To be called using performClientRequest and performClientRequestAsync to ensure version compatibility check - private final RestClient client; - private final XContentParserConfiguration parserConfig; - private final CheckedConsumer doClose; - private final boolean useAPICompatibility; - - /** Do not access directly but through getVersionValidationFuture() */ - private volatile ListenableFuture> versionValidationFuture; - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the - * {@link RestClient} to be used to perform requests. - */ - public RestHighLevelClient(RestClientBuilder restClientBuilder) { - this(restClientBuilder.build(), RestClient::close, Collections.emptyList()); - } - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and - * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. - * This constructor can be called by subclasses in case an externally created low-level REST client needs to be provided. - * The consumer argument allows to control what needs to be done when the {@link #close()} method is called. - * Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins. - */ - protected RestHighLevelClient( - RestClient restClient, - CheckedConsumer doClose, - List namedXContentEntries - ) { - this(restClient, doClose, namedXContentEntries, null); - } - - /** - * Creates a {@link RestHighLevelClient} given the low level {@link RestClient} that it should use to perform requests and - * a list of entries that allow to parse custom response sections added to Elasticsearch through plugins. - * This constructor can be called by subclasses in case an externally created low-level REST client needs to be provided. - * The consumer argument allows to control what needs to be done when the {@link #close()} method is called. - * Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins. - */ - private RestHighLevelClient( - RestClient restClient, - CheckedConsumer doClose, - List namedXContentEntries, - Boolean useAPICompatibility - ) { - this.client = Objects.requireNonNull(restClient, "restClient must not be null"); - this.doClose = Objects.requireNonNull(doClose, "doClose consumer must not be null"); - NamedXContentRegistry registry = new NamedXContentRegistry( - Stream.of(getDefaultNamedXContents().stream(), getProvidedNamedXContents().stream(), namedXContentEntries.stream()) - .flatMap(Function.identity()) - .collect(toList()) - ); - /* - * Ignores deprecation warnings. This is appropriate because it is only - * used to parse responses from Elasticsearch. Any deprecation warnings - * emitted there just mean that you are talking to an old version of - * Elasticsearch. There isn't anything you can do about the deprecation. - */ - this.parserConfig = XContentParserConfiguration.EMPTY.withRegistry(registry) - .withDeprecationHandler(DeprecationHandler.IGNORE_DEPRECATIONS); - if (useAPICompatibility == null && "true".equals(System.getenv(API_VERSIONING_ENV_VARIABLE))) { - this.useAPICompatibility = true; - } else { - this.useAPICompatibility = Boolean.TRUE.equals(useAPICompatibility); - } - } - - /** - * Returns the low-level client that the current high-level client instance is using to perform requests - */ - public final RestClient getLowLevelClient() { - return client; - } - - public final XContentParserConfiguration getParserConfig() { - return parserConfig; - } - - @Override - public final void close() throws IOException { - doClose.accept(client); - } - - /** - * Asynchronously executes a bulk request using the Bulk API. - * See Bulk API on elastic.co - * @param bulkRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { - return performRequestAsyncAndParseEntity( - bulkRequest, - RequestConverters::bulk, - options, - BulkResponse::fromXContent, - listener, - emptySet() - ); - } - - /** - * Index a document using the Index API. - * See Index API on elastic.co - * @param indexRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); - } - - /** - * Executes a search request using the Search API. - * See Search API on elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - searchRequest, - r -> RequestConverters.search(r, "_search"), - options, - SearchResponse::fromXContent, - emptySet() - ); - } - - /** - * Asynchronously executes a search using the Search API. - * See Search API on elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable searchAsync(SearchRequest searchRequest, RequestOptions options, ActionListener listener) { - return performRequestAsyncAndParseEntity( - searchRequest, - r -> RequestConverters.search(r, "_search"), - options, - SearchResponse::fromXContent, - listener, - emptySet() - ); - } - - /** - * Executes a search using the Search Scroll API. - * See Search - * Scroll API on elastic.co - * @param searchScrollRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - searchScrollRequest, - RequestConverters::searchScroll, - options, - SearchResponse::fromXContent, - emptySet() - ); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - */ - @Deprecated - private Resp performRequestAndParseEntity( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - Set ignores - ) throws IOException { - return performRequest(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), ignores); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - */ - @Deprecated - private Resp performRequest( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores - ) throws IOException { - ActionRequestValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - throw validationException; - } - return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); - } - - /** - * Provides common functionality for performing a request. - */ - private Resp internalPerformRequest( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - Set ignores - ) throws IOException { - Request req = requestConverter.apply(request); - req.setOptions(options); - Response response; - try { - response = performClientRequest(req); - } catch (ResponseException e) { - if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) { - try { - return responseConverter.apply(e.getResponse()); - } catch (Exception innerException) { - // the exception is ignored as we now try to parse the response as an error. - // this covers cases like get where 404 can either be a valid document not found response, - // or an error for which parsing is completely different. We try to consider the 404 response as a valid one - // first. If parsing of the response breaks, we fall back to parsing it as an error. - throw parseResponseException(e); - } - } - throw parseResponseException(e); - } - - try { - return responseConverter.apply(response); - } catch (Exception e) { - throw new IOException("Unable to parse response body for " + response, e); - } - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - * @return Cancellable instance that may be used to cancel the request - */ - @Deprecated - private Cancellable performRequestAsyncAndParseEntity( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, - Set ignores - ) { - return performRequestAsync( - request, - requestConverter, - options, - response -> parseEntity(response.getEntity(), entityParser), - listener, - ignores - ); - } - - /** - * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation - * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. - * @return Cancellable instance that may be used to cancel the request - */ - @Deprecated - private Cancellable performRequestAsync( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, - Set ignores - ) { - ActionRequestValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - listener.onFailure(validationException); - return Cancellable.NO_OP; - } - return internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); - } - - /** - * Provides common functionality for asynchronously performing a request. - * @return Cancellable instance that may be used to cancel the request - */ - private Cancellable internalPerformRequestAsync( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, - Set ignores - ) { - Request req; - try { - req = requestConverter.apply(request); - } catch (Exception e) { - listener.onFailure(e); - return Cancellable.NO_OP; - } - req.setOptions(options); - - ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); - return performClientRequestAsync(req, responseListener); - } - - private ResponseListener wrapResponseListener( - CheckedFunction responseConverter, - ActionListener actionListener, - Set ignores - ) { - return new ResponseListener() { - @Override - public void onSuccess(Response response) { - try { - actionListener.onResponse(responseConverter.apply(response)); - } catch (Exception e) { - IOException ioe = new IOException("Unable to parse response body for " + response, e); - onFailure(ioe); - } - } - - @Override - public void onFailure(Exception exception) { - if (exception instanceof ResponseException responseException) { - Response response = responseException.getResponse(); - if (ignores.contains(response.getStatusLine().getStatusCode())) { - try { - actionListener.onResponse(responseConverter.apply(response)); - } catch (Exception innerException) { - // the exception is ignored as we now try to parse the response as an error. - // this covers cases like get where 404 can either be a valid document not found response, - // or an error for which parsing is completely different. We try to consider the 404 response as a valid one - // first. If parsing of the response breaks, we fall back to parsing it as an error. - actionListener.onFailure(parseResponseException(responseException)); - } - } else { - actionListener.onFailure(parseResponseException(responseException)); - } - } else { - actionListener.onFailure(exception); - } - } - }; - } - - /** - * Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}. - * If a response body was returned, tries to parse it as an error returned from Elasticsearch. - * If no response body was returned or anything goes wrong while parsing the error, returns a new {@link ElasticsearchStatusException} - * that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned - * exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing. - */ - private ElasticsearchStatusException parseResponseException(ResponseException responseException) { - Response response = responseException.getResponse(); - HttpEntity entity = response.getEntity(); - ElasticsearchStatusException elasticsearchException; - RestStatus restStatus = RestStatus.fromCode(response.getStatusLine().getStatusCode()); - - if (entity == null) { - elasticsearchException = new ElasticsearchStatusException(responseException.getMessage(), restStatus, responseException); - } else { - try { - elasticsearchException = parseEntity(entity, RestResponse::errorFromXContent); - elasticsearchException.addSuppressed(responseException); - } catch (Exception e) { - elasticsearchException = new ElasticsearchStatusException("Unable to parse response body", restStatus, responseException); - elasticsearchException.addSuppressed(e); - } - } - return elasticsearchException; - } - - private Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser) - throws IOException { - if (entity == null) { - throw new IllegalStateException("Response body expected but not returned"); - } - if (entity.getContentType() == null) { - throw new IllegalStateException("Elasticsearch didn't return the [Content-Type] header, unable to parse response body"); - } - XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); - if (xContentType == null) { - throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); - } - try (XContentParser parser = xContentType.xContent().createParser(parserConfig, entity.getContent())) { - return entityParser.apply(parser); - } - } - - private enum EntityType { - JSON() { - @Override - public String header() { - return "application/json"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+json; compatible-with=7"; - } - }, - NDJSON() { - @Override - public String header() { - return "application/x-ndjson"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+x-ndjson; compatible-with=7"; - } - }, - STAR() { - @Override - public String header() { - return "application/*"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+json; compatible-with=7"; - } - }, - YAML() { - @Override - public String header() { - return "application/yaml"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+yaml; compatible-with=7"; - } - }, - SMILE() { - @Override - public String header() { - return "application/smile"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+smile; compatible-with=7"; - } - }, - CBOR() { - @Override - public String header() { - return "application/cbor"; - } - - @Override - public String compatibleHeader() { - return "application/vnd.elasticsearch+cbor; compatible-with=7"; - } - }; - - public abstract String header(); - - public abstract String compatibleHeader(); - - @Override - public String toString() { - return header(); - } - } - - private Cancellable performClientRequestAsync(Request request, ResponseListener listener) { - // Add compatibility request headers if compatibility mode has been enabled - if (this.useAPICompatibility) { - modifyRequestForCompatibility(request); - } - - ListenableFuture> versionCheck = getVersionValidationFuture(); - - // Create a future that tracks cancellation of this method's result and forwards cancellation to the actual LLRC request. - CompletableFuture cancellationForwarder = new CompletableFuture<>(); - Cancellable result = new Cancellable() { - @Override - public void cancel() { - // Raise the flag by completing the future - FutureUtils.cancel(cancellationForwarder); - } - - @Override - void runIfNotCancelled(Runnable runnable) { - if (cancellationForwarder.isCancelled()) { - throw newCancellationException(); - } - runnable.run(); - } - }; - - // Send the request after we have done the version compatibility check. Note that if it has already happened, the listener will - // be called immediately on the same thread with no asynchronous scheduling overhead. - versionCheck.addListener(new ActionListener<>() { - @Override - public void onResponse(Optional validation) { - if (validation.isPresent() == false) { - // Send the request and propagate cancellation - Cancellable call = client.performRequestAsync(request, listener); - cancellationForwarder.whenComplete((r, t) -> - // Forward cancellation to the actual request (no need to check parameters as the - // only way for cancellationForwarder to be completed is by being cancelled). - call.cancel()); - } else { - // Version validation wasn't successful, fail the request with the validation result. - listener.onFailure(new ElasticsearchException(validation.get())); - } - } - - @Override - public void onFailure(Exception e) { - // Propagate validation request failure. This will be transient since `getVersionValidationFuture` clears the validation - // future if the request fails, leading to retries at the next HLRC request (see comments below). - listener.onFailure(e); - } - }); - - return result; - } - - /** - * Go through all the request's existing headers, looking for {@code headerName} headers and if they exist, - * changing them to use version compatibility. If no request headers are changed, modify the entity type header if appropriate - */ - private boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHeader, String headerName) { - // Modify any existing "Content-Type" headers on the request to use the version compatibility, if available - boolean contentTypeModified = false; - for (Header header : new ArrayList<>(newOptions.getHeaders())) { - if (headerName.equalsIgnoreCase(header.getName()) == false) { - continue; - } - contentTypeModified = contentTypeModified || modifyHeader(newOptions, header, headerName); - } - - // If there were no request-specific headers, modify the request entity's header to be compatible - if (entityHeader != null && contentTypeModified == false) { - contentTypeModified = modifyHeader(newOptions, entityHeader, headerName); - } - - return contentTypeModified; - } - - /** - * Modify the given header to be version compatible, if necessary. - * Returns true if a modification was made, false otherwise. - */ - private boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String headerName) { - for (EntityType type : EntityType.values()) { - final String headerValue = header.getValue(); - if (headerValue.startsWith(type.header())) { - String newHeaderValue = headerValue.replace(type.header(), type.compatibleHeader()); - newOptions.removeHeader(header.getName()); - newOptions.addHeader(headerName, newHeaderValue); - return true; - } - } - return false; - } - - /** - * Make all necessary changes to support API compatibility for the given request. This includes - * modifying the "Content-Type" and "Accept" headers if present, or modifying the header based - * on the request's entity type. - */ - private void modifyRequestForCompatibility(Request request) { - final Header entityHeader = request.getEntity() == null ? null : request.getEntity().getContentType(); - final RequestOptions.Builder newOptions = request.getOptions().toBuilder(); - - addCompatibilityFor(newOptions, entityHeader, "Content-Type"); - if (request.getOptions().containsHeader("Accept")) { - addCompatibilityFor(newOptions, entityHeader, "Accept"); - } else { - // There is no entity, and no existing accept header, but we still need one - // with compatibility, so use the compatible JSON (default output) format - newOptions.addHeader("Accept", EntityType.JSON.compatibleHeader()); - } - request.setOptions(newOptions); - } - - private Response performClientRequest(Request request) throws IOException { - // Add compatibility request headers if compatibility mode has been enabled - if (this.useAPICompatibility) { - modifyRequestForCompatibility(request); - } - - Optional versionValidation; - try { - final var future = new PlainActionFuture>(); - getVersionValidationFuture().addListener(future); - versionValidation = future.get(); - } catch (InterruptedException | ExecutionException e) { - // Unlikely to happen - throw new ElasticsearchException(e); - } - - if (versionValidation.isPresent() == false) { - return client.performRequest(request); - } else { - throw new ElasticsearchException(versionValidation.get()); - } - } - - /** - * Returns a future that asynchronously validates the Elasticsearch product version. Its result is an optional string: if empty then - * validation was successful, if present it contains the validation error. API requests should be chained to this future and check - * the validation result before going further. - *

- * This future is a memoization of the first successful request to the "/" endpoint and the subsequent compatibility check - * ({@see #versionValidationFuture}). Further client requests reuse its result. - *

- * If the version check request fails (e.g. network error), {@link #versionValidationFuture} is cleared so that a new validation - * request is sent at the next HLRC request. This allows retries to happen while avoiding a busy retry loop (LLRC retries on the node - * pool still happen). - */ - private ListenableFuture> getVersionValidationFuture() { - ListenableFuture> currentFuture = this.versionValidationFuture; - if (currentFuture != null) { - return currentFuture; - } else { - synchronized (this) { - // Re-check in synchronized block - currentFuture = this.versionValidationFuture; - if (currentFuture != null) { - return currentFuture; - } - ListenableFuture> future = new ListenableFuture<>(); - this.versionValidationFuture = future; - - // Asynchronously call the info endpoint and complete the future with the version validation result. - Request req = new Request("GET", "/"); - // These status codes are nominal in the context of product version verification - req.addParameter("ignore", "401,403"); - client.performRequestAsync(req, new ResponseListener() { - @Override - public void onSuccess(Response response) { - Optional validation; - try { - validation = getVersionValidation(response); - } catch (Exception e) { - logger.error("Failed to parse info response", e); - validation = Optional.of( - "Failed to parse info response. Check logs for detailed information - " + e.getMessage() - ); - } - future.onResponse(validation); - } - - @Override - public void onFailure(Exception exception) { - - // Fail the requests (this one and the ones waiting for it) and clear the future - // so that we retry the next time the client executes a request. - versionValidationFuture = null; - future.onFailure(exception); - } - }); - - return future; - } - } - } - - /** - * Validates that the response info() is a compatible Elasticsearch version. - * - * @return an optional string. If empty, version is compatible. Otherwise, it's the message to return to the application. - */ - private Optional getVersionValidation(Response response) throws IOException { - // Let requests go through if the client doesn't have permissions for the info endpoint. - int statusCode = response.getStatusLine().getStatusCode(); - if (statusCode == 401 || statusCode == 403) { - return Optional.empty(); - } - - MainResponse mainResponse; - try { - mainResponse = parseEntity(response.getEntity(), MainResponse::fromXContent); - } catch (ResponseException e) { - throw parseResponseException(e); - } - - String version = mainResponse.getVersion().getNumber(); - if (Strings.hasLength(version) == false) { - return Optional.of("Missing version.number in info response"); - } - - String[] parts = version.split("\\."); - if (parts.length < 2) { - return Optional.of("Wrong version.number format in info response"); - } - - int major = Integer.parseInt(parts[0]); - int minor = Integer.parseInt(parts[1]); - - if (major < 6) { - return Optional.of("Elasticsearch version 6 or more is required"); - } - - if (major == 6 || (major == 7 && minor < 14)) { - if ("You Know, for Search".equalsIgnoreCase(mainResponse.getTagline()) == false) { - return Optional.of("Invalid or missing tagline [" + mainResponse.getTagline() + "]"); - } - - return Optional.empty(); - } - - String header = response.getHeader("X-Elastic-Product"); - if (header == null) { - return Optional.of( - "Missing [X-Elastic-Product] header. Please check that you are connecting to an Elasticsearch " - + "instance, and that any networking filters are preserving that header." - ); - } - - if ("Elasticsearch".equals(header) == false) { - return Optional.of("Invalid value [" + header + "] for [X-Elastic-Product] header."); - } - - return Optional.empty(); - } - - private static List getDefaultNamedXContents() { - Map> map = new HashMap<>(); - map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c)); - map.put(InternalHDRPercentiles.NAME, (p, c) -> ParsedHDRPercentiles.fromXContent(p, (String) c)); - map.put(InternalHDRPercentileRanks.NAME, (p, c) -> ParsedHDRPercentileRanks.fromXContent(p, (String) c)); - map.put(InternalTDigestPercentiles.NAME, (p, c) -> ParsedTDigestPercentiles.fromXContent(p, (String) c)); - map.put(InternalTDigestPercentileRanks.NAME, (p, c) -> ParsedTDigestPercentileRanks.fromXContent(p, (String) c)); - map.put(PercentilesBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c)); - map.put(MedianAbsoluteDeviationAggregationBuilder.NAME, (p, c) -> ParsedMedianAbsoluteDeviation.fromXContent(p, (String) c)); - map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c)); - map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); - map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); - map.put(AvgAggregationBuilder.NAME, (p, c) -> ParsedAvg.fromXContent(p, (String) c)); - map.put(WeightedAvgAggregationBuilder.NAME, (p, c) -> ParsedWeightedAvg.fromXContent(p, (String) c)); - map.put(ValueCountAggregationBuilder.NAME, (p, c) -> ParsedValueCount.fromXContent(p, (String) c)); - map.put(InternalSimpleValue.NAME, (p, c) -> ParsedSimpleValue.fromXContent(p, (String) c)); - map.put(DerivativePipelineAggregationBuilder.NAME, (p, c) -> ParsedDerivative.fromXContent(p, (String) c)); - map.put(InternalBucketMetricValue.NAME, (p, c) -> ParsedBucketMetricValue.fromXContent(p, (String) c)); - map.put(StatsAggregationBuilder.NAME, (p, c) -> ParsedStats.fromXContent(p, (String) c)); - map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c)); - map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c)); - map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c)); - map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c)); - map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); - map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); - map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); - map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c)); - map.put(VariableWidthHistogramAggregationBuilder.NAME, (p, c) -> ParsedVariableWidthHistogram.fromXContent(p, (String) c)); - map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); - map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); - map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); - map.put(LongRareTerms.NAME, (p, c) -> ParsedLongRareTerms.fromXContent(p, (String) c)); - map.put(StringRareTerms.NAME, (p, c) -> ParsedStringRareTerms.fromXContent(p, (String) c)); - map.put(MissingAggregationBuilder.NAME, (p, c) -> ParsedMissing.fromXContent(p, (String) c)); - map.put(NestedAggregationBuilder.NAME, (p, c) -> ParsedNested.fromXContent(p, (String) c)); - map.put(ReverseNestedAggregationBuilder.NAME, (p, c) -> ParsedReverseNested.fromXContent(p, (String) c)); - map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); - map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); - map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); - map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c)); - map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); - map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); - map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); - map.put(FiltersAggregationBuilder.NAME, (p, c) -> ParsedFilters.fromXContent(p, (String) c)); - map.put(AdjacencyMatrixAggregationBuilder.NAME, (p, c) -> ParsedAdjacencyMatrix.fromXContent(p, (String) c)); - map.put(SignificantLongTerms.NAME, (p, c) -> ParsedSignificantLongTerms.fromXContent(p, (String) c)); - map.put(SignificantStringTerms.NAME, (p, c) -> ParsedSignificantStringTerms.fromXContent(p, (String) c)); - map.put(ScriptedMetricAggregationBuilder.NAME, (p, c) -> ParsedScriptedMetric.fromXContent(p, (String) c)); - map.put(IpRangeAggregationBuilder.NAME, (p, c) -> ParsedBinaryRange.fromXContent(p, (String) c)); - map.put(TopHitsAggregationBuilder.NAME, (p, c) -> ParsedTopHits.fromXContent(p, (String) c)); - map.put(CompositeAggregationBuilder.NAME, (p, c) -> ParsedComposite.fromXContent(p, (String) c)); - map.put(StringStatsAggregationBuilder.NAME, (p, c) -> ParsedStringStats.PARSER.parse(p, (String) c)); - map.put(TopMetricsAggregationBuilder.NAME, (p, c) -> ParsedTopMetrics.PARSER.parse(p, (String) c)); - map.put(TimeSeriesAggregationBuilder.NAME, (p, c) -> ParsedTimeSeries.fromXContent(p, (String) (c))); - List entries = map.entrySet() - .stream() - .map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue())) - .collect(Collectors.toList()); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(TermSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> TermSuggestion.fromXContent(parser, (String) context) - ) - ); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(PhraseSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> PhraseSuggestion.fromXContent(parser, (String) context) - ) - ); - entries.add( - new NamedXContentRegistry.Entry( - Suggest.Suggestion.class, - new ParseField(CompletionSuggestionBuilder.SUGGESTION_NAME), - (parser, context) -> CompletionSuggestion.fromXContent(parser, (String) context) - ) - ); - return entries; - } - - /** - * Loads and returns the {@link NamedXContentRegistry.Entry} parsers provided by plugins. - */ - private static List getProvidedNamedXContents() { - List entries = new ArrayList<>(); - for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) { - entries.addAll(service.getNamedXContentParsers()); - } - return entries; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java deleted file mode 100644 index b7635f7054299..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import java.util.Optional; - -/** - * Defines a validation layer for Requests. - */ -public interface Validatable { - - Validatable EMPTY = new Validatable() { - }; - - /** - * Perform validation. This method does not have to be overridden in the event that no validation needs to be done, - * or the validation was done during object construction time. A {@link ValidationException} that is not null is - * assumed to contain validation errors and will be thrown. - * - * @return An {@link Optional} {@link ValidationException} that contains a list of validation errors. - */ - default Optional validate() { - return Optional.empty(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java deleted file mode 100644 index d5701c5723096..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import org.elasticsearch.core.Nullable; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Encapsulates an accumulation of validation errors - */ -public class ValidationException extends IllegalArgumentException { - - /** - * Creates {@link ValidationException} instance initialized with given error messages. - * @param error the errors to add - * @return {@link ValidationException} instance - */ - public static ValidationException withError(String... error) { - return withErrors(Arrays.asList(error)); - } - - /** - * Creates {@link ValidationException} instance initialized with given error messages. - * @param errors the list of errors to add - * @return {@link ValidationException} instance - */ - public static ValidationException withErrors(List errors) { - ValidationException e = new ValidationException(); - for (String error : errors) { - e.addValidationError(error); - } - return e; - } - - private final List validationErrors = new ArrayList<>(); - - /** - * Add a new validation error to the accumulating validation errors - * @param error the error to add - */ - public void addValidationError(final String error) { - validationErrors.add(error); - } - - /** - * Adds validation errors from an existing {@link ValidationException} to - * the accumulating validation errors - * @param exception the {@link ValidationException} to add errors from - */ - public final void addValidationErrors(final @Nullable ValidationException exception) { - if (exception != null) { - for (String error : exception.validationErrors()) { - addValidationError(error); - } - } - } - - /** - * Returns the validation errors accumulated - */ - public final List validationErrors() { - return validationErrors; - } - - @Override - public final String getMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("Validation Failed: "); - int index = 0; - for (String error : validationErrors) { - sb.append(++index).append(": ").append(error).append(";"); - } - return sb.toString(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java deleted file mode 100644 index bf7b1a665e098..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainResponse.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.Objects; - -public class MainResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - MainResponse.class.getName(), - true, - args -> { - return new MainResponse((String) args[0], (Version) args[1], (String) args[2], (String) args[3], (String) args[4]); - } - ); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), Version.PARSER, new ParseField("version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_uuid")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("tagline")); - - } - - private final String nodeName; - private final Version version; - private final String clusterName; - private final String clusterUuid; - private final String tagline; - - public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, String tagline) { - this.nodeName = nodeName; - this.version = version; - this.clusterName = clusterName; - this.clusterUuid = clusterUuid; - this.tagline = tagline; - } - - public String getNodeName() { - return nodeName; - } - - public Version getVersion() { - return version; - } - - public String getClusterName() { - return clusterName; - } - - public String getClusterUuid() { - return clusterUuid; - } - - public String getTagline() { - return tagline; - } - - public static MainResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - MainResponse that = (MainResponse) o; - return nodeName.equals(that.nodeName) - && version.equals(that.version) - && clusterName.equals(that.clusterName) - && clusterUuid.equals(that.clusterUuid) - && tagline.equals(that.tagline); - } - - @Override - public int hashCode() { - return Objects.hash(nodeName, version, clusterName, clusterUuid, tagline); - } - - public static class Version { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - Version.class.getName(), - true, - args -> { - return new Version( - (String) args[0], - (String) args[1], - (String) args[2], - (String) args[3], - (String) args[4], - (Boolean) args[5], - (String) args[6], - (String) args[7], - (String) args[8] - ); - } - ); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("number")); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("build_flavor")); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("build_type")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("build_hash")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("build_date")); - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), new ParseField("build_snapshot")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("lucene_version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("minimum_wire_compatibility_version")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("minimum_index_compatibility_version")); - } - private final String number; - private final String buildFlavor; - private final String buildType; - private final String buildHash; - private final String buildDate; - private final boolean isSnapshot; - private final String luceneVersion; - private final String minimumWireCompatibilityVersion; - private final String minimumIndexCompatibilityVersion; - - public Version( - String number, - String buildFlavor, - String buildType, - String buildHash, - String buildDate, - boolean isSnapshot, - String luceneVersion, - String minimumWireCompatibilityVersion, - String minimumIndexCompatibilityVersion - ) { - this.number = number; - this.buildFlavor = buildFlavor; - this.buildType = buildType; - this.buildHash = buildHash; - this.buildDate = buildDate; - this.isSnapshot = isSnapshot; - this.luceneVersion = luceneVersion; - this.minimumWireCompatibilityVersion = minimumWireCompatibilityVersion; - this.minimumIndexCompatibilityVersion = minimumIndexCompatibilityVersion; - } - - public String getNumber() { - return number; - } - - public String getBuildFlavor() { - return buildFlavor; - } - - public String getBuildType() { - return buildType; - } - - public String getBuildHash() { - return buildHash; - } - - public String getBuildDate() { - return buildDate; - } - - public boolean isSnapshot() { - return isSnapshot; - } - - public String getLuceneVersion() { - return luceneVersion; - } - - public String getMinimumWireCompatibilityVersion() { - return minimumWireCompatibilityVersion; - } - - public String getMinimumIndexCompatibilityVersion() { - return minimumIndexCompatibilityVersion; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Version version = (Version) o; - return isSnapshot == version.isSnapshot - && number.equals(version.number) - && Objects.equals(buildFlavor, version.buildFlavor) - && Objects.equals(buildType, version.buildType) - && buildHash.equals(version.buildHash) - && buildDate.equals(version.buildDate) - && luceneVersion.equals(version.luceneVersion) - && minimumWireCompatibilityVersion.equals(version.minimumWireCompatibilityVersion) - && minimumIndexCompatibilityVersion.equals(version.minimumIndexCompatibilityVersion); - } - - @Override - public int hashCode() { - return Objects.hash( - number, - buildFlavor, - buildType, - buildHash, - buildDate, - isSnapshot, - luceneVersion, - minimumWireCompatibilityVersion, - minimumIndexCompatibilityVersion - ); - } - } -} diff --git a/client/rest-high-level/testnode.crt b/client/rest-high-level/testnode.crt deleted file mode 100644 index 08c160bcea5ff..0000000000000 --- a/client/rest-high-level/testnode.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID0zCCArugAwIBAgIJALi5bDfjMszLMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNV -BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3Rp -Y3NlYXJjaCBUZXN0IE5vZGUwHhcNMTUwOTIzMTg1MjU3WhcNMTkwOTIyMTg1MjU3 -WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxIDAeBgNV -BAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEA3rGZ1QbsW0+MuyrSLmMfDFKtLBkIFW8V0gRuurFg1PUKKNR1 -Mq2tMVwjjYETAU/UY0iKZOzjgvYPKhDTYBTte/WHR1ZK4CYVv7TQX/gtFQG/ge/c -7u0sLch9p7fbd+/HZiLS/rBEZDIohvgUvzvnA8+OIYnw4kuxKo/5iboAIS41klMg -/lATm8V71LMY68inht71/ZkQoAHKgcR9z4yNYvQ1WqKG8DG8KROXltll3sTrKbl5 -zJhn660es/1ZnR6nvwt6xnSTl/mNHMjkfv1bs4rJ/py3qPxicdoSIn/KyojUcgHV -F38fuAy2CQTdjVG5fWj9iz+mQvLm3+qsIYQdFwIDAQABo4G/MIG8MAkGA1UdEwQC -MAAwHQYDVR0OBBYEFEMMWLWQi/g83PzlHYqAVnty5L7HMIGPBgNVHREEgYcwgYSC -CWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghds -b2NhbGhvc3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5s -b2NhbGRvbWFpbjaHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL -BQADggEBAMjGGXT8Nt1tbl2GkiKtmiuGE2Ej66YuZ37WSJViaRNDVHLlg87TCcHe -k2rdO+6sFqQbbzEfwQ05T7xGmVu7tm54HwKMRugoQ3wct0bQC5wEWYN+oMDvSyO6 -M28mZwWb4VtR2IRyWP+ve5DHwTM9mxWa6rBlGzsQqH6YkJpZojzqk/mQTug+Y8aE -mVoqRIPMHq9ob+S9qd5lp09+MtYpwPfTPx/NN+xMEooXWW/ARfpGhWPkg/FuCu4z -1tFmCqHgNcWirzMm3dQpF78muE9ng6OB2MXQwL4VgnVkxmlZNHbkR2v/t8MyZJxC -y4g6cTMM3S/UMt5/+aIB2JAuMKyuD+A= ------END CERTIFICATE----- diff --git a/client/rest-high-level/testnode.jks b/client/rest-high-level/testnode.jks deleted file mode 100644 index ebe6146124e8f..0000000000000 Binary files a/client/rest-high-level/testnode.jks and /dev/null differ diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 7154a2be5bbd8..ed087bef0ac76 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -87,6 +87,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; +import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; /** * Client that connects to an Elasticsearch cluster through HTTP. @@ -106,6 +107,9 @@ * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format. */ public class RestClient implements Closeable { + + public static final String IGNORE_RESPONSE_CODES_PARAM = "ignore"; + private static final Log logger = LogFactory.getLog(RestClient.class); private final CloseableHttpAsyncClient client; @@ -780,8 +784,8 @@ private class InternalRequest { this.request = request; Map params = new HashMap<>(request.getParameters()); params.putAll(request.getOptions().getParameters()); - // ignore is a special parameter supported by the clients, shouldn't be sent to es - String ignoreString = params.remove("ignore"); + // IGNORE_RESPONSE_CODES_PARAM is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = params.remove(IGNORE_RESPONSE_CODES_PARAM); this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); URI uri = buildUri(pathPrefix, request.getEndpoint(), params); this.httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity(), compressionEnabled); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index a1c4d3fab076a..10d24242ae620 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -275,6 +275,7 @@ public void testErrorStatusCodes() throws Exception { try { Request request = new Request(method, "/" + errorStatusCode); if (false == ignoreParam.isEmpty()) { + // literal "ignore" rather than IGNORE_RESPONSE_CODES_PARAM since this is something on which callers might rely request.addParameter("ignore", ignoreParam); } Response response = restClient.performRequest(request); @@ -568,6 +569,7 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { if (randomBoolean()) { ignore += "," + randomFrom(RestClientTestUtil.getAllErrorStatusCodes()); } + // literal "ignore" rather than IGNORE_RESPONSE_CODES_PARAM since this is something on which callers might rely request.addParameter("ignore", ignore); } URI uri = uriBuilder.build(); diff --git a/distribution/build.gradle b/distribution/build.gradle index 90af1472deb2e..e45f1d09625d6 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -14,6 +14,7 @@ import org.elasticsearch.gradle.internal.ConcatFilesTask import org.elasticsearch.gradle.internal.DependenciesInfoPlugin import org.elasticsearch.gradle.internal.NoticeTask import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin import java.nio.file.Files import java.nio.file.Path @@ -30,6 +31,15 @@ configurations { attribute(Category.CATEGORY_ATTRIBUTE, project.getObjects().named(Category.class, Category.DOCUMENTATION)) } } + featuresMetadata { + attributes { + attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + } + } +} + +dependencies { + featuresMetadata project(':server') } def thisProj = project @@ -196,6 +206,7 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { } distro.copyModule(processDefaultOutputsTaskProvider, module) + dependencies.add('featuresMetadata', module) if (module.name.startsWith('transport-') || (BuildParams.snapshotBuild == false && module.name == 'apm')) { distro.copyModule(processIntegTestOutputsTaskProvider, module) } @@ -214,6 +225,7 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> } } distro.copyModule(processDefaultOutputsTaskProvider, xpackModule) + dependencies.add('featuresMetadata', xpackModule) if (xpackModule.name.equals('core') || xpackModule.name.equals('security')) { distro.copyModule(processIntegTestOutputsTaskProvider, xpackModule) } diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index 39f9bbf536dda..0a47d0652e465 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -8,15 +8,17 @@ tasks.named(sourceSets.unsupportedJdkVersionEntrypoint.compileJavaTaskName).conf targetCompatibility = JavaVersion.VERSION_1_8 } + tasks.named("jar") { manifest { attributes("Multi-Release": "true") } + FileCollection mainOutput = sourceSets.main.output; from(sourceSets.unsupportedJdkVersionEntrypoint.output) eachFile { details -> if (details.path.equals("org/elasticsearch/tools/java_version_checker/JavaVersionChecker.class") && - sourceSets.main.output.asFileTree.contains(details.file)) { + mainOutput.asFileTree.contains(details.file)) { details.relativePath = details.relativePath.prepend("META-INF/versions/17") } } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 25b5883166ccc..e0d1dd983c0de 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -24,8 +24,8 @@ dependencies { implementation 'org.ow2.asm:asm:9.5' implementation 'org.ow2.asm:asm-tree:9.5' - api "org.bouncycastle:bcpg-fips:1.0.4" - api "org.bouncycastle:bc-fips:1.0.2" + api "org.bouncycastle:bcpg-fips:1.0.7.1" + api "org.bouncycastle:bc-fips:1.0.2.4" testImplementation project(":test:framework") testImplementation "com.google.jimfs:jimfs:${versions.jimfs}" testRuntimeOnly "com.google.guava:guava:${versions.jimfs_guava}" diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index e9adf9882b6db..c088e89338e74 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -88,7 +88,6 @@ import java.util.Arrays; import java.util.Date; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -1591,15 +1590,6 @@ public void testGetSemanticVersion() { assertThat(InstallPluginAction.getSemanticVersion("foo-1.2.3"), nullValue()); } - private Map> namedComponentsMap() { - Map> result = new LinkedHashMap<>(); - Map extensibles = new LinkedHashMap<>(); - extensibles.put("a_component", "p.A"); - extensibles.put("b_component", "p.B"); - result.put("org.elasticsearch.plugins.cli.test_model.ExtensibleInterface", extensibles); - return result; - } - private static String namedComponentsJSON() { return """ { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java index 7d900155488b2..73e89fc948029 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/RemovePluginActionTests.java @@ -140,10 +140,6 @@ public void testRemoveMultiple() throws Exception { assertRemoveCleaned(env); } - private static Version minimumCompatibleVersion(Version v) { - return Version.fromString((v.major - 1) + ".0.0"); - } - public void testBin() throws Exception { createPlugin("fake"); Path binDir = env.binFile().resolve("fake"); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java index b6cd680cb5816..9dcd630f52631 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -145,7 +145,7 @@ static List apmJvmOptions(Settings settings, @Nullable SecureSettings se // Configures a log file to write to. Don't disable writing to a log file, // as the agent will then require extra Security Manager permissions when // it tries to do something else, and it's just painful. - propertiesMap.put("log_file", logsDir.resolve("apm-agent.log").toString()); + propertiesMap.put("log_file", logsDir.resolve("apm-agent.json").toString()); // No point doing anything if we don't have a destination for the trace data, and it can't be configured dynamically if (propertiesMap.containsKey("server_url") == false && propertiesMap.containsKey("server_urls") == false) { diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java index 25c61c41638d1..ea2df72fb2c0b 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java @@ -107,6 +107,14 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce // we are running in the foreground, so wait for the server to exit int exitCode = server.waitFor(); + onExit(exitCode); + } + + /** + * A post-exit hook to perform additional processing before the command terminates + * @param exitCode the server process exit code + */ + protected void onExit(int exitCode) throws UserException { if (exitCode != ExitCodes.OK) { throw new UserException(exitCode, "Elasticsearch exited unexpectedly"); } diff --git a/docs/build.gradle b/docs/build.gradle index 33e6cc6080a95..da3d83378e894 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -72,6 +72,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.history_index_enabled', 'false' keystorePassword 'keystore-password' + if (BuildParams.isSnapshotBuild() == false) { + requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) + } } // debug ccr test failures: diff --git a/docs/changelog/100408.yaml b/docs/changelog/100408.yaml new file mode 100644 index 0000000000000..275c3b4a0de48 --- /dev/null +++ b/docs/changelog/100408.yaml @@ -0,0 +1,5 @@ +pr: 100408 +summary: "ESQL: Make blocks ref counted" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/100570.yaml b/docs/changelog/100570.yaml new file mode 100644 index 0000000000000..b68a905b0e046 --- /dev/null +++ b/docs/changelog/100570.yaml @@ -0,0 +1,5 @@ +pr: 100570 +summary: Added metric for cache eviction of entries with non zero frequency +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/100921.yaml b/docs/changelog/100921.yaml new file mode 100644 index 0000000000000..e6e2caa93d465 --- /dev/null +++ b/docs/changelog/100921.yaml @@ -0,0 +1,5 @@ +pr: 100921 +summary: "Add support for Serbian Language Analyzer" +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/100938.yaml b/docs/changelog/100938.yaml new file mode 100644 index 0000000000000..b21f6955c992e --- /dev/null +++ b/docs/changelog/100938.yaml @@ -0,0 +1,5 @@ +pr: 100938 +summary: "Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics" +area: Stats +type: enhancement +issues: [] diff --git a/docs/changelog/101333.yaml b/docs/changelog/101333.yaml new file mode 100644 index 0000000000000..4452687b995d3 --- /dev/null +++ b/docs/changelog/101333.yaml @@ -0,0 +1,29 @@ +pr: 101333 +summary: Fixed JWT principal from claims +area: Authorization +type: breaking +issues: [] +breaking: + title: Fixed JWT principal from claims + area: Authorization + details: "This changes the format of a JWT's principal before the JWT is actually\ + \ validated by any JWT realm. The JWT's principal is a convenient way to refer\ + \ to a JWT that has not yet been verified by a JWT realm. The JWT's principal\ + \ is printed in the audit and regular logs (notably for auditing authn failures)\ + \ as well as the smart realm chain reordering optimization. The JWT principal\ + \ is NOT required to be identical to the JWT-authenticated user's principal, but\ + \ in general, they should be similar. Previously, the JWT's principal was built\ + \ by individual realms in the same way the realms built the authenticated user's\ + \ principal. This had the advantage that, in simpler JWT realms configurations\ + \ (e.g. a single JWT realm in the chain), the JWT principal and the authenticated\ + \ user's principal are very similar. However the drawback is that, in general,\ + \ the JWT principal and the user principal can be very different (i.e. in the\ + \ case where one JWT realm builds the JWT principal and a different one builds\ + \ the user principal). Another downside is that the (unauthenticated) JWT principal\ + \ depended on realm ordering, which makes identifying the JWT from its principal\ + \ dependent on the ES authn realm configuration. This PR implements a consistent\ + \ fixed logic to build the JWT principal, which only depends on the JWT's claims\ + \ and no ES configuration." + impact: "Users will observe changed format and values for the `user.name` attribute\ + \ of `authentication_failed` audit log events, in the JWT (failed) authn case." + notable: false diff --git a/docs/changelog/101390.yaml b/docs/changelog/101390.yaml new file mode 100644 index 0000000000000..23bdef6e39dfe --- /dev/null +++ b/docs/changelog/101390.yaml @@ -0,0 +1,5 @@ +pr: 101390 +summary: Enable inter-segment concurrency for terms aggs +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/101409.yaml b/docs/changelog/101409.yaml new file mode 100644 index 0000000000000..82e7f339fdd89 --- /dev/null +++ b/docs/changelog/101409.yaml @@ -0,0 +1,5 @@ +pr: 101409 +summary: Adding a simulate ingest api +area: Ingest Node +type: feature +issues: [] diff --git a/docs/changelog/101423.yaml b/docs/changelog/101423.yaml new file mode 100644 index 0000000000000..a5497d444797f --- /dev/null +++ b/docs/changelog/101423.yaml @@ -0,0 +1,5 @@ +pr: 101423 +summary: Export circuit breaker trip count as a counter metric +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/101476.yaml b/docs/changelog/101476.yaml deleted file mode 100644 index ee4cd9b1e4b1a..0000000000000 --- a/docs/changelog/101476.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101476 -summary: Mark legacy stack templates as deprecated -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/101577.yaml b/docs/changelog/101577.yaml new file mode 100644 index 0000000000000..e485fd3811cb6 --- /dev/null +++ b/docs/changelog/101577.yaml @@ -0,0 +1,5 @@ +pr: 101577 +summary: Add metrics to the shared blob cache +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/101609.yaml b/docs/changelog/101609.yaml new file mode 100644 index 0000000000000..27993574743d2 --- /dev/null +++ b/docs/changelog/101609.yaml @@ -0,0 +1,9 @@ +pr: 101609 +summary: > + Add a node feature join barrier. This prevents nodes from joining clusters that do not have + all the features already present in the cluster. This ensures that once a features is supported + by all the nodes in a cluster, that feature will never then not be supported in the future. + This is the corresponding functionality for the version join barrier, but for features +area: "Cluster Coordination" +type: feature +issues: [] diff --git a/docs/changelog/101682.yaml b/docs/changelog/101682.yaml new file mode 100644 index 0000000000000..e512006057581 --- /dev/null +++ b/docs/changelog/101682.yaml @@ -0,0 +1,5 @@ +pr: 101682 +summary: "Add manage_enrich cluster privilege to kibana_system role" +area: Authentication +type: enhancement +issues: [] diff --git a/docs/changelog/101705.yaml b/docs/changelog/101705.yaml deleted file mode 100644 index baa7e69d48d88..0000000000000 --- a/docs/changelog/101705.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101705 -summary: Respect regional AWS STS endpoints -area: Snapshot/Restore -type: bug -issues: - - 89175 diff --git a/docs/changelog/101723.yaml b/docs/changelog/101723.yaml new file mode 100644 index 0000000000000..146d164805f00 --- /dev/null +++ b/docs/changelog/101723.yaml @@ -0,0 +1,6 @@ +pr: 101723 +summary: Allowing non-dynamic index settings to be updated by automatically unassigning + shards +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/101799.yaml b/docs/changelog/101799.yaml deleted file mode 100644 index a3ef5fb190177..0000000000000 --- a/docs/changelog/101799.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101799 -summary: Fix memory leak from JWT cache (and fix the usage of the JWT auth cache) -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/101826.yaml b/docs/changelog/101826.yaml new file mode 100644 index 0000000000000..87f3f8df1b0c2 --- /dev/null +++ b/docs/changelog/101826.yaml @@ -0,0 +1,6 @@ +pr: 101826 +summary: Support keyed histograms +area: Aggregations +type: enhancement +issues: + - 100242 diff --git a/docs/changelog/101845.yaml b/docs/changelog/101845.yaml new file mode 100644 index 0000000000000..0dd95bdabca57 --- /dev/null +++ b/docs/changelog/101845.yaml @@ -0,0 +1,5 @@ +pr: 101845 +summary: Introduce new endpoint to expose data stream lifecycle stats +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/101859.yaml b/docs/changelog/101859.yaml new file mode 100644 index 0000000000000..54f3fb12810ca --- /dev/null +++ b/docs/changelog/101859.yaml @@ -0,0 +1,6 @@ +pr: 101859 +summary: Cover head/tail commands edge cases and data types coverage +area: EQL +type: bug +issues: + - 101724 diff --git a/docs/changelog/101904.yaml b/docs/changelog/101904.yaml new file mode 100644 index 0000000000000..cad422cc52e15 --- /dev/null +++ b/docs/changelog/101904.yaml @@ -0,0 +1,5 @@ +pr: 101904 +summary: Allow granting API keys with JWT as the access_token +area: Security +type: feature +issues: [] diff --git a/docs/changelog/101915.yaml b/docs/changelog/101915.yaml new file mode 100644 index 0000000000000..aed7ca62021a5 --- /dev/null +++ b/docs/changelog/101915.yaml @@ -0,0 +1,5 @@ +pr: 101915 +summary: Add inference counts by model to the machine learning usage stats +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/101979.yaml b/docs/changelog/101979.yaml new file mode 100644 index 0000000000000..ad119df24d36f --- /dev/null +++ b/docs/changelog/101979.yaml @@ -0,0 +1,5 @@ +pr: 101979 +summary: Calculate CO2 and emmission and costs +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/101989.yaml b/docs/changelog/101989.yaml new file mode 100644 index 0000000000000..d294d194bd4e8 --- /dev/null +++ b/docs/changelog/101989.yaml @@ -0,0 +1,5 @@ +pr: 101989 +summary: Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats` +area: Health +type: enhancement +issues: [] diff --git a/docs/changelog/102020.yaml b/docs/changelog/102020.yaml new file mode 100644 index 0000000000000..7c74e9676d342 --- /dev/null +++ b/docs/changelog/102020.yaml @@ -0,0 +1,5 @@ +pr: 102020 +summary: Retrieve stacktrace events from a custom index +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102048.yaml b/docs/changelog/102048.yaml new file mode 100644 index 0000000000000..54bc1d9eae52e --- /dev/null +++ b/docs/changelog/102048.yaml @@ -0,0 +1,5 @@ +pr: 102048 +summary: "Repo analysis: verify empty register" +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/102051.yaml b/docs/changelog/102051.yaml new file mode 100644 index 0000000000000..c3ca4a546928f --- /dev/null +++ b/docs/changelog/102051.yaml @@ -0,0 +1,5 @@ +pr: 102051 +summary: "Repo analysis: allow configuration of register ops" +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/102056.yaml b/docs/changelog/102056.yaml new file mode 100644 index 0000000000000..455f66ba90b03 --- /dev/null +++ b/docs/changelog/102056.yaml @@ -0,0 +1,5 @@ +pr: 102056 +summary: Use `BulkRequest` to store Application Privileges +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/102057.yaml b/docs/changelog/102057.yaml new file mode 100644 index 0000000000000..d5b664ba14c29 --- /dev/null +++ b/docs/changelog/102057.yaml @@ -0,0 +1,6 @@ +pr: 102057 +summary: Simplify `BlobStoreRepository` idle check +area: Snapshot/Restore +type: bug +issues: + - 101948 diff --git a/docs/changelog/102065.yaml b/docs/changelog/102065.yaml new file mode 100644 index 0000000000000..1a9a219df4502 --- /dev/null +++ b/docs/changelog/102065.yaml @@ -0,0 +1,5 @@ +pr: 102065 +summary: Add more desired balance stats +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/102075.yaml b/docs/changelog/102075.yaml new file mode 100644 index 0000000000000..54daae04169db --- /dev/null +++ b/docs/changelog/102075.yaml @@ -0,0 +1,5 @@ +pr: 102075 +summary: Accept a single or multiple inputs to `_inference` +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102089.yaml b/docs/changelog/102089.yaml new file mode 100644 index 0000000000000..9f33c0648d09f --- /dev/null +++ b/docs/changelog/102089.yaml @@ -0,0 +1,5 @@ +pr: 102089 +summary: Add prefix strings option to trained models +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102114.yaml b/docs/changelog/102114.yaml new file mode 100644 index 0000000000000..a08389da0351b --- /dev/null +++ b/docs/changelog/102114.yaml @@ -0,0 +1,6 @@ +pr: 102114 +summary: Fix double-completion in `SecurityUsageTransportAction` +area: Security +type: bug +issues: + - 102111 diff --git a/docs/changelog/102138.yaml b/docs/changelog/102138.yaml new file mode 100644 index 0000000000000..3819e3201150e --- /dev/null +++ b/docs/changelog/102138.yaml @@ -0,0 +1,5 @@ +pr: 102138 +summary: Skip shards that don't match the source query during checkpointing +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/102140.yaml b/docs/changelog/102140.yaml new file mode 100644 index 0000000000000..0f086649b9710 --- /dev/null +++ b/docs/changelog/102140.yaml @@ -0,0 +1,6 @@ +pr: 102140 +summary: Collect data tiers usage stats more efficiently +area: ILM+SLM +type: bug +issues: + - 100230 \ No newline at end of file diff --git a/docs/changelog/102151.yaml b/docs/changelog/102151.yaml new file mode 100644 index 0000000000000..652ae555af97d --- /dev/null +++ b/docs/changelog/102151.yaml @@ -0,0 +1,5 @@ +pr: 102151 +summary: Default `run_ml_inference` should be true +area: Application +type: bug +issues: [] diff --git a/docs/changelog/102165.yaml b/docs/changelog/102165.yaml new file mode 100644 index 0000000000000..e1c4c76f1f6ff --- /dev/null +++ b/docs/changelog/102165.yaml @@ -0,0 +1,6 @@ +pr: 102165 +summary: Fix planning of duplicate aggs +area: ES|QL +type: bug +issues: + - 102083 diff --git a/docs/changelog/102172.yaml b/docs/changelog/102172.yaml new file mode 100644 index 0000000000000..485c2c4327e11 --- /dev/null +++ b/docs/changelog/102172.yaml @@ -0,0 +1,5 @@ +pr: 102172 +summary: Adjust Histogram's bucket accounting to be iteratively +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/102183.yaml b/docs/changelog/102183.yaml new file mode 100644 index 0000000000000..3daa1418ba5d0 --- /dev/null +++ b/docs/changelog/102183.yaml @@ -0,0 +1,13 @@ +pr: 102183 +summary: "[ES|QL] pow function always returns double" +area: ES|QL +type: "breaking" +issues: + - 99055 +breaking: + title: "[ES|QL] pow function always returns double" + area: REST API + details: "In ES|QL, the pow function no longer returns the type of its inputs, instead\ + \ always returning a double." + impact: low. Most queries should continue to function with the change. + notable: false diff --git a/docs/changelog/102184.yaml b/docs/changelog/102184.yaml new file mode 100644 index 0000000000000..ba4d045b6b0aa --- /dev/null +++ b/docs/changelog/102184.yaml @@ -0,0 +1,5 @@ +pr: 102184 +summary: Track ESQL enrich memory +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102188.yaml b/docs/changelog/102188.yaml new file mode 100644 index 0000000000000..595a8395fab5c --- /dev/null +++ b/docs/changelog/102188.yaml @@ -0,0 +1,5 @@ +pr: 102188 +summary: Track blocks in `AsyncOperator` +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102190.yaml b/docs/changelog/102190.yaml new file mode 100644 index 0000000000000..cd04e041fca5e --- /dev/null +++ b/docs/changelog/102190.yaml @@ -0,0 +1,5 @@ +pr: 102190 +summary: Track pages in ESQL enrich request/response +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102192.yaml b/docs/changelog/102192.yaml new file mode 100644 index 0000000000000..531aa943c9e36 --- /dev/null +++ b/docs/changelog/102192.yaml @@ -0,0 +1,5 @@ +pr: 102192 +summary: "ESQL: Load more than one field at once" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102193.yaml b/docs/changelog/102193.yaml new file mode 100644 index 0000000000000..4d64493602ff2 --- /dev/null +++ b/docs/changelog/102193.yaml @@ -0,0 +1,5 @@ +pr: 102193 +summary: Fix cache invalidation on privilege modification +area: Authorization +type: bug +issues: [] diff --git a/docs/changelog/102208.yaml b/docs/changelog/102208.yaml new file mode 100644 index 0000000000000..b566a85753d82 --- /dev/null +++ b/docs/changelog/102208.yaml @@ -0,0 +1,5 @@ +pr: 102208 +summary: Add static node settings to set default values for max merged segment sizes +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/102220.yaml b/docs/changelog/102220.yaml new file mode 100644 index 0000000000000..d24dab1f91b31 --- /dev/null +++ b/docs/changelog/102220.yaml @@ -0,0 +1,5 @@ +pr: 102220 +summary: Upgrade xmlsec to 2.3.4 +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102230.yaml b/docs/changelog/102230.yaml new file mode 100644 index 0000000000000..20e8d8d1f10a6 --- /dev/null +++ b/docs/changelog/102230.yaml @@ -0,0 +1,6 @@ +pr: 102230 +summary: Set region for the STS client via privileged calls in AWS SDK +area: Snapshot/Restore +type: bug +issues: + - 102173 diff --git a/docs/changelog/102240.yaml b/docs/changelog/102240.yaml new file mode 100644 index 0000000000000..5df0046ee92fc --- /dev/null +++ b/docs/changelog/102240.yaml @@ -0,0 +1,5 @@ +pr: 102240 +summary: Exclude stack traces from transform audit messages and health +area: Transform +type: bug +issues: [] diff --git a/docs/changelog/102244.yaml b/docs/changelog/102244.yaml new file mode 100644 index 0000000000000..3b160e033b57e --- /dev/null +++ b/docs/changelog/102244.yaml @@ -0,0 +1,5 @@ +pr: 102244 +summary: Expose reconciliation metrics via APM +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/102245.yaml b/docs/changelog/102245.yaml new file mode 100644 index 0000000000000..387540d96290c --- /dev/null +++ b/docs/changelog/102245.yaml @@ -0,0 +1,5 @@ +pr: 102245 +summary: Add non-green indicator names to `HealthPeriodicLogger` message +area: Health +type: enhancement +issues: [] diff --git a/docs/changelog/102250.yaml b/docs/changelog/102250.yaml new file mode 100644 index 0000000000000..755341d9a3a64 --- /dev/null +++ b/docs/changelog/102250.yaml @@ -0,0 +1,6 @@ +pr: 102250 +summary: "[ILM] Fix downsample to skip already downsampled indices" +area: ILM+SLM +type: bug +issues: + - 102249 diff --git a/docs/changelog/102259.yaml b/docs/changelog/102259.yaml new file mode 100644 index 0000000000000..3d8a1c6381f6d --- /dev/null +++ b/docs/changelog/102259.yaml @@ -0,0 +1,5 @@ +pr: 102259 +summary: "[Usage API] Count all the data streams that have lifecycle" +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/102273.yaml b/docs/changelog/102273.yaml new file mode 100644 index 0000000000000..78ecc8b2d2734 --- /dev/null +++ b/docs/changelog/102273.yaml @@ -0,0 +1,5 @@ +pr: 102273 +summary: Improve analyzer reload log message +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/102281.yaml b/docs/changelog/102281.yaml new file mode 100644 index 0000000000000..ac6c17591e013 --- /dev/null +++ b/docs/changelog/102281.yaml @@ -0,0 +1,5 @@ +pr: 102281 +summary: Improve failure handling in `ContinuousComputation` +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/102282.yaml b/docs/changelog/102282.yaml new file mode 100644 index 0000000000000..4860d70f99ccc --- /dev/null +++ b/docs/changelog/102282.yaml @@ -0,0 +1,6 @@ +pr: 102282 +summary: "ES|QL: Fix drop of renamed grouping" +area: ES|QL +type: bug +issues: + - 102121 diff --git a/docs/changelog/102292.yaml b/docs/changelog/102292.yaml new file mode 100644 index 0000000000000..953c3ffdf6150 --- /dev/null +++ b/docs/changelog/102292.yaml @@ -0,0 +1,5 @@ +pr: 102292 +summary: Consider duplicate stacktraces in custom index +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/102311.yaml b/docs/changelog/102311.yaml new file mode 100644 index 0000000000000..bb1769527fdd4 --- /dev/null +++ b/docs/changelog/102311.yaml @@ -0,0 +1,5 @@ +pr: 102311 +summary: Upgrade reactor netty http version +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/docs/changelog/102317.yaml b/docs/changelog/102317.yaml new file mode 100644 index 0000000000000..89b2ae5432101 --- /dev/null +++ b/docs/changelog/102317.yaml @@ -0,0 +1,6 @@ +pr: 102317 +summary: "ESQL: Fix single value query" +area: ES|QL +type: bug +issues: + - 102298 diff --git a/docs/changelog/102350.yaml b/docs/changelog/102350.yaml new file mode 100644 index 0000000000000..00a311c5d99f8 --- /dev/null +++ b/docs/changelog/102350.yaml @@ -0,0 +1,6 @@ +pr: 102350 +summary: "ESQL: Fix rare bug with empty string" +area: ES|QL +type: bug +issues: + - 101969 diff --git a/docs/changelog/102379.yaml b/docs/changelog/102379.yaml new file mode 100644 index 0000000000000..0773b137779a5 --- /dev/null +++ b/docs/changelog/102379.yaml @@ -0,0 +1,6 @@ +pr: 102379 +summary: Pass source query to `_field_caps` (as `index_filter`) when deducing destination index mappings for better + performance +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/102388.yaml b/docs/changelog/102388.yaml new file mode 100644 index 0000000000000..3e65e46949bda --- /dev/null +++ b/docs/changelog/102388.yaml @@ -0,0 +1,6 @@ +pr: 102388 +summary: Add support for `index_filter` to open pit +area: Search +type: enhancement +issues: + - 99740 diff --git a/docs/changelog/102391.yaml b/docs/changelog/102391.yaml new file mode 100644 index 0000000000000..5fcbb9e6d2858 --- /dev/null +++ b/docs/changelog/102391.yaml @@ -0,0 +1,5 @@ +pr: 102391 +summary: "ESQL: Support the `_source` metadata field" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102396.yaml b/docs/changelog/102396.yaml new file mode 100644 index 0000000000000..9ea53ca5b6840 --- /dev/null +++ b/docs/changelog/102396.yaml @@ -0,0 +1,5 @@ +pr: 102396 +summary: Add more logging to the real memory circuit breaker and lower minimum interval +area: "Infra/Circuit Breakers" +type: bug +issues: [] diff --git a/docs/changelog/102399.yaml b/docs/changelog/102399.yaml new file mode 100644 index 0000000000000..7a4e1ff7ddab6 --- /dev/null +++ b/docs/changelog/102399.yaml @@ -0,0 +1,6 @@ +pr: 102399 +summary: "ES|QL: Fix layout management for Project" +area: ES|QL +type: bug +issues: + - 102120 diff --git a/docs/changelog/102434.yaml b/docs/changelog/102434.yaml new file mode 100644 index 0000000000000..ab6aa886c13b1 --- /dev/null +++ b/docs/changelog/102434.yaml @@ -0,0 +1,5 @@ +pr: 102434 +summary: "ESQL: Short circuit loading empty doc values" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102447.yaml b/docs/changelog/102447.yaml new file mode 100644 index 0000000000000..76823153670bd --- /dev/null +++ b/docs/changelog/102447.yaml @@ -0,0 +1,6 @@ +pr: 102447 +summary: Pass transform source query as `index_filter` to `open_point_in_time` request +area: Transform +type: enhancement +issues: + - 101049 diff --git a/docs/changelog/102456.yaml b/docs/changelog/102456.yaml new file mode 100644 index 0000000000000..6ef3b8f16f53c --- /dev/null +++ b/docs/changelog/102456.yaml @@ -0,0 +1,6 @@ +pr: 102456 +summary: Switch logs data streams to search all fields by default +area: Data streams +type: enhancement +issues: + - 99872 diff --git a/docs/changelog/102461.yaml b/docs/changelog/102461.yaml new file mode 100644 index 0000000000000..c0c07554ed21f --- /dev/null +++ b/docs/changelog/102461.yaml @@ -0,0 +1,5 @@ +pr: 102461 +summary: Enable concurrency for scripted metric agg +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102462.yaml b/docs/changelog/102462.yaml new file mode 100644 index 0000000000000..d44ccc4cbbc5c --- /dev/null +++ b/docs/changelog/102462.yaml @@ -0,0 +1,5 @@ +pr: 102462 +summary: Check the real memory circuit breaker when building global ordinals +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102467.yaml b/docs/changelog/102467.yaml new file mode 100644 index 0000000000000..580788a5aa2f9 --- /dev/null +++ b/docs/changelog/102467.yaml @@ -0,0 +1,6 @@ +pr: 102467 +summary: Fix dense_vector cluster stats indexed_vector_dim_min/max values +area: Mapping +type: bug +issues: + - 102416 diff --git a/docs/changelog/102472.yaml b/docs/changelog/102472.yaml new file mode 100644 index 0000000000000..b0f5bfc714643 --- /dev/null +++ b/docs/changelog/102472.yaml @@ -0,0 +1,5 @@ +pr: 102472 +summary: Expose the `invalidation` field in Get/Query `ApiKey` APIs +area: Security +type: enhancement +issues: [ ] diff --git a/docs/changelog/102476.yaml b/docs/changelog/102476.yaml new file mode 100644 index 0000000000000..a53a20ecfec20 --- /dev/null +++ b/docs/changelog/102476.yaml @@ -0,0 +1,5 @@ +pr: 102476 +summary: Unwrap `ExecutionException` when loading from cache in `AbstractIndexOrdinalsFieldData` +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/102490.yaml b/docs/changelog/102490.yaml new file mode 100644 index 0000000000000..8ff554ab0f0fe --- /dev/null +++ b/docs/changelog/102490.yaml @@ -0,0 +1,6 @@ +pr: 102490 +summary: "ESQL: Load text field from parent keyword field" +area: ES|QL +type: enhancement +issues: + - 102473 diff --git a/docs/changelog/102492.yaml b/docs/changelog/102492.yaml new file mode 100644 index 0000000000000..943d82873e0b6 --- /dev/null +++ b/docs/changelog/102492.yaml @@ -0,0 +1,5 @@ +pr: 102492 +summary: Ensure datafeed previews with no start or end time don't search the cold or frozen tiers +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/102495.yaml b/docs/changelog/102495.yaml new file mode 100644 index 0000000000000..77ae42f7eebcb --- /dev/null +++ b/docs/changelog/102495.yaml @@ -0,0 +1,6 @@ +pr: 102495 +summary: "Add support for configuring proxy scheme in S3 client settings and EC2 discovery plugin" +area: Distributed +type: enhancement +issues: + - 101873 diff --git a/docs/changelog/102510.yaml b/docs/changelog/102510.yaml new file mode 100644 index 0000000000000..2b654b5c85929 --- /dev/null +++ b/docs/changelog/102510.yaml @@ -0,0 +1,7 @@ +pr: 102510 +summary: "ESQL: Make fieldcaps calls lighter" +area: ES|QL +type: enhancement +issues: + - 101763 + - 102393 diff --git a/docs/changelog/102511.yaml b/docs/changelog/102511.yaml new file mode 100644 index 0000000000000..cf80ca03e197f --- /dev/null +++ b/docs/changelog/102511.yaml @@ -0,0 +1,5 @@ +pr: 102511 +summary: Trigger parent circuit breaker when building scorers in filters aggregation +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/102512.yaml b/docs/changelog/102512.yaml new file mode 100644 index 0000000000000..d4bc765ecaf5f --- /dev/null +++ b/docs/changelog/102512.yaml @@ -0,0 +1,6 @@ +pr: 102512 +summary: Implement exponential backoff for transform state persistence retrying +area: Transform +type: enhancement +issues: + - 102528 diff --git a/docs/changelog/102562.yaml b/docs/changelog/102562.yaml new file mode 100644 index 0000000000000..a4b4f5a095118 --- /dev/null +++ b/docs/changelog/102562.yaml @@ -0,0 +1,5 @@ +pr: 102562 +summary: Track blocks of intermediate state of aggs +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102570.yaml b/docs/changelog/102570.yaml new file mode 100644 index 0000000000000..2d3f878dbbb27 --- /dev/null +++ b/docs/changelog/102570.yaml @@ -0,0 +1,5 @@ +pr: 102570 +summary: Added `beat.stats.libbeat.pipeline.queue.max_events` +area: Monitoring +type: enhancement +issues: [] diff --git a/docs/changelog/102571.yaml b/docs/changelog/102571.yaml new file mode 100644 index 0000000000000..25272408161db --- /dev/null +++ b/docs/changelog/102571.yaml @@ -0,0 +1,5 @@ +pr: 102571 +summary: Allow executing multiple periodic flushes while they are being made durable +area: Store +type: enhancement +issues: [] diff --git a/docs/changelog/102580.yaml b/docs/changelog/102580.yaml new file mode 100644 index 0000000000000..50d315efd7071 --- /dev/null +++ b/docs/changelog/102580.yaml @@ -0,0 +1,6 @@ +pr: 102580 +summary: Fix DISSECT with empty patterns +area: ES|QL +type: bug +issues: + - 102577 diff --git a/docs/changelog/102598.yaml b/docs/changelog/102598.yaml new file mode 100644 index 0000000000000..c32519acdf6d1 --- /dev/null +++ b/docs/changelog/102598.yaml @@ -0,0 +1,5 @@ +pr: 102598 +summary: Add apm api for asynchronous counters (always increasing) +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/102599.yaml b/docs/changelog/102599.yaml new file mode 100644 index 0000000000000..74e3d89421463 --- /dev/null +++ b/docs/changelog/102599.yaml @@ -0,0 +1,5 @@ +pr: 102599 +summary: "Recreate the Elasticsearch private temporary directory if it doesn't exist when an ML job is opened" +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/102602.yaml b/docs/changelog/102602.yaml new file mode 100644 index 0000000000000..dd01eaa98b214 --- /dev/null +++ b/docs/changelog/102602.yaml @@ -0,0 +1,5 @@ +pr: 102602 +summary: Consider search context missing exceptions as recoverable +area: Transform +type: bug +issues: [] diff --git a/docs/changelog/102612.yaml b/docs/changelog/102612.yaml new file mode 100644 index 0000000000000..60808ae72801a --- /dev/null +++ b/docs/changelog/102612.yaml @@ -0,0 +1,5 @@ +pr: 102612 +summary: Track blocks when hashing single multi-valued field +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102636.yaml b/docs/changelog/102636.yaml new file mode 100644 index 0000000000000..8b32e0568b0fb --- /dev/null +++ b/docs/changelog/102636.yaml @@ -0,0 +1,5 @@ +pr: 102636 +summary: Revert non-semantic `NodeInfo` +area: Infra/Core +type: regression +issues: [] diff --git a/docs/changelog/102637.yaml b/docs/changelog/102637.yaml new file mode 100644 index 0000000000000..4d5d689934bd6 --- /dev/null +++ b/docs/changelog/102637.yaml @@ -0,0 +1,5 @@ +pr: 102637 +summary: Improve stability of spike and dip detection for the change point aggregation +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102644.yaml b/docs/changelog/102644.yaml new file mode 100644 index 0000000000000..17c5cbebed7cc --- /dev/null +++ b/docs/changelog/102644.yaml @@ -0,0 +1,5 @@ +pr: 102644 +summary: Disable parallelism for composite agg against high cardinality fields +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102673.yaml b/docs/changelog/102673.yaml new file mode 100644 index 0000000000000..16546edb3cf3c --- /dev/null +++ b/docs/changelog/102673.yaml @@ -0,0 +1,5 @@ +pr: 102673 +summary: "ESQL: Share constant null Blocks" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102680.yaml b/docs/changelog/102680.yaml new file mode 100644 index 0000000000000..8b32c5029ea2a --- /dev/null +++ b/docs/changelog/102680.yaml @@ -0,0 +1,5 @@ +pr: 102680 +summary: Make `api_key.delete.interval` a dynamic setting +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102682.yaml b/docs/changelog/102682.yaml new file mode 100644 index 0000000000000..190ff3df5a7f6 --- /dev/null +++ b/docs/changelog/102682.yaml @@ -0,0 +1,5 @@ +pr: 102682 +summary: Introduce fielddata cache ttl +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102710.yaml b/docs/changelog/102710.yaml new file mode 100644 index 0000000000000..ee805c70180a0 --- /dev/null +++ b/docs/changelog/102710.yaml @@ -0,0 +1,5 @@ +pr: 102710 +summary: Enable concurrency for multi terms agg +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/102715.yaml b/docs/changelog/102715.yaml new file mode 100644 index 0000000000000..7311db66ce151 --- /dev/null +++ b/docs/changelog/102715.yaml @@ -0,0 +1,6 @@ +pr: 102715 +summary: Fix leaking blocks in TopN +area: ES|QL +type: bug +issues: + - 102646 diff --git a/docs/changelog/102716.yaml b/docs/changelog/102716.yaml new file mode 100644 index 0000000000000..39317fdb38415 --- /dev/null +++ b/docs/changelog/102716.yaml @@ -0,0 +1,5 @@ +pr: 102716 +summary: Fix leaking blocks in `BlockUtils` +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/102735.yaml b/docs/changelog/102735.yaml new file mode 100644 index 0000000000000..4726e08d1f314 --- /dev/null +++ b/docs/changelog/102735.yaml @@ -0,0 +1,5 @@ +pr: 102735 +summary: "[Profiling] Report in status API if docs exist" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/99134.yaml b/docs/changelog/99134.yaml new file mode 100644 index 0000000000000..10156b9b30066 --- /dev/null +++ b/docs/changelog/99134.yaml @@ -0,0 +1,5 @@ +pr: 99134 +summary: Add ability to create a data stream failure store +area: Data streams +type: feature +issues: [] diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 3947ed71ea9ae..44acba4752aaa 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -97,6 +97,11 @@ The available settings for the EC2 discovery plugin are as follows. this setting determines the port to use to connect to the proxy. Defaults to `80`. +`discovery.ec2.proxy.scheme`:: + + The scheme to use when connecting to the EC2 service endpoint through proxy specified + in `discovery.ec2.proxy.host`. Valid values are `http` or `https`. Defaults to `http`. + `discovery.ec2.proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index 2bedcd4698b42..d7d837b2f8364 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -127,8 +127,7 @@ init_script:: Executed prior to any collection of documents. Allows the ag + In the above example, the `init_script` creates an array `transactions` in the `state` object. -map_script:: Executed once per document collected. This is a required script. If no combine_script is specified, the resulting state - needs to be stored in the `state` object. +map_script:: Executed once per document collected. This is a required script. + In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index b5f1315531916..44a00b9f5b99e 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -68,7 +68,7 @@ POST /_search -------------------------------------------------- // TEST[setup:sales] -<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals +<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-month intervals <2> A `sum` metric is used to calculate the sum of a field. This could be any numeric metric (sum, min, max, etc) <3> Finally, we specify a `moving_fn` aggregation which uses "the_sum" metric as its input. diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 45cb725492f07..5273537389e3d 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -36,6 +36,7 @@ following types are supported: <>, <>, <>, +<>, <>, <>, <>, @@ -64,8 +65,8 @@ The following analyzers support setting custom `stem_exclusion` list: `arabic`, `armenian`, `basque`, `bengali`, `bulgarian`, `catalan`, `czech`, `dutch`, `english`, `finnish`, `french`, `galician`, `german`, `hindi`, `hungarian`, `indonesian`, `irish`, `italian`, `latvian`, -`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`, -`spanish`, `swedish`, `turkish`. +`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `serbian`, +`sorani`, `spanish`, `swedish`, `turkish`. ==== Reimplementing language analyzers @@ -1588,6 +1589,55 @@ PUT /russian_example <2> This filter should be removed unless there are words which should be excluded from stemming. +[[serbian-analyzer]] +===== `serbian` analyzer + +The `serbian` analyzer could be reimplemented as a `custom` analyzer as follows: + +[source,console] +---------------------------------------------------- +PUT /serbian_example +{ + "settings": { + "analysis": { + "filter": { + "serbian_stop": { + "type": "stop", + "stopwords": "_serbian_" <1> + }, + "serbian_keywords": { + "type": "keyword_marker", + "keywords": ["пример"] <2> + }, + "serbian_stemmer": { + "type": "stemmer", + "language": "serbian" + } + }, + "analyzer": { + "rebuilt_serbian": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "serbian_stop", + "serbian_keywords", + "serbian_stemmer", + "serbian_normalization" + ] + } + } + } + } +} +---------------------------------------------------- +// TEST[s/"serbian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: serbian_example, first: serbian, second: rebuilt_serbian}\nendyaml\n/] + +<1> The default stopwords can be overridden with the `stopwords` +or `stopwords_path` parameters. +<2> This filter should be removed unless there are words which should +be excluded from stemming. + [[sorani-analyzer]] ===== `sorani` analyzer diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index a76bc6f6c5254..57e402988cc5a 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -9,7 +9,7 @@ A filter that stems words using a Snowball-generated stemmer. The values: `Arabic`, `Armenian`, `Basque`, `Catalan`, `Danish`, `Dutch`, `English`, `Estonian`, `Finnish`, `French`, `German`, `German2`, `Hungarian`, `Italian`, `Irish`, `Kp`, `Lithuanian`, `Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, -`Russian`, `Spanish`, `Swedish`, `Turkish`. +`Russian`, `Serbian`, `Spanish`, `Swedish`, `Turkish`. For example: diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index 162164e12872d..b8d883b057823 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -230,6 +230,9 @@ Russian:: https://snowballstem.org/algorithms/russian/stemmer.html[*`russian`*], https://doc.rero.ch/lm.php?url=1000%2C43%2C4%2C20091209094227-CA%2FDolamic_Ljiljana_-_Indexing_and_Searching_Strategies_for_the_Russian_20091209.pdf[`light_russian`] +Serbian:: +https://snowballstem.org/algorithms/serbian/stemmer.html[*`serbian`*] + Spanish:: https://www.ercim.eu/publication/ws-proceedings/CLEF2/savoy.pdf[*`light_spanish`*], https://snowballstem.org/algorithms/spanish/stemmer.html[`spanish`] diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index 12e0d76f9901b..abba633b643dc 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -356,6 +356,10 @@ parameter and a link to their predefined stop words in Lucene. `_russian_`:: {lucene-stop-word-link}/snowball/russian_stop.txt[Russian stop words] +[[serbian-stop-words]] +`_serbian_`:: +{lucene-stop-word-link}/sr/stopwords.txt[Serbian stop words] + [[sorani-stop-words]] `_sorani_`:: {lucene-stop-word-link}/ckb/stopwords.txt[Sorani stop words] diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 3922ef018a713..86d72cf52c9e9 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -601,7 +601,7 @@ stream's oldest backing index. // TESTRESPONSE[s/"index_uuid": "_eEfRrFHS9OyhqWntkgHAQ"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.07-000001"/"index_name": $body.data_streams.0.indices.0.index_name/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.08-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> First item in the `indices` array for `my-data-stream`. This item contains information about the stream's oldest backing index, @@ -704,4 +704,4 @@ Use the <> to update an existing data stream's aliases. Changing an existing data stream's aliases in its index pattern has no effect. -include::../alias.asciidoc[tag=alias-multiple-actions-example] \ No newline at end of file +include::../alias.asciidoc[tag=alias-multiple-actions-example] diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index d3580ca4448a7..3c2e703d264ff 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -25,6 +25,8 @@ preview:[] preview:[] * <> preview:[] +* <> +preview:[] The following API is available for <>: @@ -55,4 +57,6 @@ include::{es-repo-dir}/data-streams/lifecycle/apis/delete-lifecycle.asciidoc[] include::{es-repo-dir}/data-streams/lifecycle/apis/explain-lifecycle.asciidoc[] +include::{es-repo-dir}/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc[] + include::{es-repo-dir}/indices/downsample-data-stream.asciidoc[] diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index b7d46b6301884..5bdfaf428d169 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -17,7 +17,7 @@ DELETE _ingest/pipeline/my-timestamp-pipeline The recommended way to downsample a time series data stream (TSDS) is <>. However, if you're not using ILM, you can downsample a TSDS manually. This guide shows you -how, using typical Kubernetes cluster monitoring data. +how, using typical Kubernetes cluster monitoring data. To test out manual downsampling, follow these steps: @@ -32,13 +32,13 @@ To test out manual downsampling, follow these steps: ==== Prerequisites * Refer to the <>. -* It is not possible to downsample a data stream directly, nor +* It is not possible to downsample a data stream directly, nor multiple indices at once. It's only possible to downsample one time series index (TSDS backing index). * In order to downsample an index, it needs to be read-only. For a TSDS write index, this means it needs to be rolled over and made read-only first. * Downsampling uses UTC timestamps. -* Downsampling needs at least one metric field to exist in the time series +* Downsampling needs at least one metric field to exist in the time series index. [discrete] @@ -51,8 +51,8 @@ First, you'll create a TSDS. For simplicity, in the time series mapping all be used. The `time_series_metric` values determine the kind of statistical representations that are used during downsampling. -The index template includes a set of static -<>: `host`, `namespace`, +The index template includes a set of static +<>: `host`, `namespace`, `node`, and `pod`. The time series dimensions are not changed by the downsampling process. @@ -388,6 +388,7 @@ This returns: // TESTRESPONSE[s/"ltOJGmqgTVm4T-Buoe7Acg"/$body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"2023-07-26T09:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.start/] // TESTRESPONSE[s/"2023-07-26T13:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.end/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] <1> The backing index for this data stream. Before a backing index can be downsampled, the TSDS needs to be rolled over and diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc new file mode 100644 index 0000000000000..6fa82dc2a810c --- /dev/null +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -0,0 +1,93 @@ +[[data-streams-get-lifecycle-stats]] +=== Get data stream lifecycle stats +++++ +Get Data Stream Lifecycle +++++ + +preview::[] + +Gets stats about the execution of data stream lifecycle. + +[[get-lifecycle-stats-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `monitor` or +`manage` <> to use this API. + +[[data-streams-get-lifecycle-stats-request]] +==== {api-request-title} + +`GET _lifecycle/stats` + +[[data-streams-get-lifecycle-stats-desc]] +==== {api-description-title} + +Gets stats about the execution of the data stream lifecycle. The data stream level stats include only stats about data streams +managed by the data stream lifecycle. + +[[get-lifecycle-stats-api-response-body]] +==== {api-response-body-title} + +`last_run_duration_in_millis`:: +(Optional, long) +The duration of the last data stream lifecycle execution. +`time_between_starts_in_millis`:: +(Optional, long) +The time passed between the start of the last two data stream lifecycle executions. This should amount approximately to +<>. +`data_stream_count`:: +(integer) +The count of data streams currently being managed by the data stream lifecycle. +`data_streams`:: +(array of objects) +Contains information about the retrieved data stream lifecycles. ++ +.Properties of objects in `data_streams` +[%collapsible%open] +==== +`name`:: +(string) +The name of the data stream. +`backing_indices_in_total`:: +(integer) +The count of the backing indices of this data stream that are managed by the data stream lifecycle. +`backing_indices_in_error`:: +(integer) +The count of the backing indices of this data stream that are managed by the data stream lifecycle and have encountered an error. +==== + +[[data-streams-get-lifecycle-stats-example]] +==== {api-examples-title} + +Let's retrieve the data stream lifecycle stats of a cluster that has already executed the lifecycle more than once: + +[source,console] +-------------------------------------------------- +GET _lifecycle/stats?human&pretty +-------------------------------------------------- +// TEST[skip:this is for demonstration purposes only, we cannot ensure that DSL has run] + +The response will look like the following: + +[source,console-result] +-------------------------------------------------- +{ + "last_run_duration_in_millis": 2, + "last_run_duration": "2ms", + "time_between_starts_in_millis": 9998, + "time_between_starts": "9.99s", + "data_streams_count": 2, + "data_streams": [ + { + "name": "my-data-stream", + "backing_indices_in_total": 2, + "backing_indices_in_error": 0 + }, + { + "name": "my-other-stream", + "backing_indices_in_total": 2, + "backing_indices_in_error": 1 + } + ] +} +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index de11bbcfc2d4e..aa598b010badc 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -1,20 +1,36 @@ [role="xpack"] [[tutorial-migrate-data-stream-from-ilm-to-dsl]] -=== Tutorial: Migrate ILM managed data stream to Data stream lifecycle +=== Tutorial: Migrate ILM managed data stream to data stream lifecycle preview::[] -In this tutorial we'll look at migrating an existing data stream from {ilm-init} to -Data stream lifecycle. The existing {ilm-init} managed backing indices will continue +In this tutorial we'll look at migrating an existing data stream from Index Lifecycle Management ({ilm-init}) to +data stream lifecycle. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, -the new backing indices will be managed by Data stream lifecycle. -This way, a data stream is gradually migrated away from being managed by {ilm-cap} to -being managed by Data stream lifecycle. As we'll see, {ilm-cap} and Data stream lifecycle +the new backing indices will be managed by data stream lifecycle. +This way, a data stream is gradually migrated away from being managed by {ilm-init} to +being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle can co-manage a data stream; however, an index can only be managed by one system at a time. -Let's first create a data stream with two backing indices managed by {ilm-cap}. -We first create an {ilm-cap} policy: +[discrete] +[[migrate-dsl-ilm-tldr]] +==== TL;DR +To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute +two steps: + +1. Update the index template that's backing the data stream to set <> +to `false`, and to configure data stream lifecycle. +2. Configure the data stream lifecycle for the _existing_ data stream using +the <>. + +For more details see the <> section. + +[discrete] +[[setup-test-data]] +==== Setup ILM managed data stream +Let's first create a data stream with two backing indices managed by {ilm-init}. +We first create an {ilm-init} policy: [source,console] ---- @@ -40,7 +56,7 @@ PUT _ilm/policy/pre-dsl-ilm-policy } ---- -And let's create an index template that'll back the data stream and configures {ilm-cap}: +And let's create an index template that'll back the data stream and configures {ilm-init}: [source,console] ---- @@ -77,7 +93,7 @@ POST dsl-data-stream/_rollover ---- // TEST[continued] -We'll use the <> API to inspect the state of +We'll use the <> API to inspect the state of the data stream: [source,console] @@ -87,7 +103,7 @@ GET _data_stream/dsl-data-stream // TEST[continued] Inspecting the response we'll see that both backing indices are managed by {ilm-init} -and that the next generation index will also be managed by {ilm-init}: +and that the next generation index will also be managed by {ilm-init}: [source,console-result] ---- @@ -100,7 +116,7 @@ and that the next generation index will also be managed by {ilm-init}: }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", <1> + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", <1> "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", "prefer_ilm": true, <2> "ilm_policy": "pre-dsl-ilm-policy", <3> @@ -132,37 +148,40 @@ and that the next generation index will also be managed by {ilm-init}: // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The name of the backing index. -<2> For each backing index we display the value of the <> +<2> For each backing index we display the value of the <> configuration which will indicate if {ilm-init} takes precedence over data stream lifecycle in case both systems are configured for an index. -<3> The {ilm-ini} policy configured for this index. -<4> The system that manages this index (possible values are "Index Lifecycle Management", +<3> The {ilm-init} policy configured for this index. +<4> The system that manages this index (possible values are "Index Lifecycle Management", "Data stream lifecycle", or "Unmanaged") -<5> The system that will manage the next generation index (the new write index of this -data stream, once the data stream is rolled over). The possible values are +<5> The system that will manage the next generation index (the new write index of this +data stream, once the data stream is rolled over). The possible values are "Index Lifecycle Management", "Data stream lifecycle", or "Unmanaged". <6> The <> value configured in the index template that's backing the data stream. This value will be configured for all the new backing indices. If it's not configured in the index template the backing indices will receive the `true` -default value ({ilm-init} takes precedence over data stream lifecycle by default as it's +default value ({ilm-init} takes precedence over data stream lifecycle by default as it's currently richer in features). -<7> The {ilm-init} policy configured in the index template that's backing this data -stream (which will be configured on all the new backing indices, as long as it exists +<7> The {ilm-init} policy configured in the index template that's backing this data +stream (which will be configured on all the new backing indices, as long as it exists in the index template). +[discrete] +[[migrate-from-ilm-to-dsl]] +==== Migrate data stream to data stream lifecycle To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the index template to configure <> +1. Update the index template that's backing the data stream to set <> to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using the <>. IMPORTANT: The data stream lifecycle configuration that's added to the index template, -being a data stream configuration, will only apply to **new** data streams. +being a data stream configuration, will only apply to **new** data streams. Our data stream exists already, so even though we added a data stream lifecycle configuration in the index template it will not be applied to `dsl-data-stream`. @@ -192,13 +211,13 @@ PUT _index_template/dsl-data-stream-template <1> The `prefer_ilm` setting will now be configured on the **new** backing indices (created by rolling over the data stream) such that {ilm-init} does _not_ take -precedence over Data stream lifecycle. +precedence over data stream lifecycle. <2> We're configuring the data stream lifecycle so _new_ data streams will be -managed by Data stream lifecycle. +managed by data stream lifecycle. -We've now make sure that new data streams will be managed by Data stream lifecycle. +We've now made sure that new data streams will be managed by data stream lifecycle. -Let's update our existing `dsl-data-stream` and configure Data stream lifecycle: +Let's update our existing `dsl-data-stream` and configure data stream lifecycle: [source,console] ---- @@ -210,7 +229,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle // TEST[continued] We can inspect the data stream to check that the next generation will indeed be -managed by Data stream lifecycle: +managed by data stream lifecycle: [source,console] -------------------------------------------------- @@ -229,10 +248,10 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", "managed_by": "Index Lifecycle Management" <1> }, { @@ -250,7 +269,7 @@ GET _data_stream/dsl-data-stream "enabled": true, "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", + "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> "prefer_ilm": false, <4> "hidden": false, @@ -265,7 +284,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The existing backing index will continue to be managed by {ilm-init} <2> The existing backing index will continue to be managed by {ilm-init} @@ -274,7 +293,7 @@ GET _data_stream/dsl-data-stream and will be configured accordingly for new backing indices. We'll now rollover the data stream to see the new generation index being managed by -Data stream lifecycle: +data stream lifecycle: [source,console] ---- @@ -299,11 +318,11 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" <1> + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", + "managed_by": "Index Lifecycle Management" <1> }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000002", @@ -327,9 +346,9 @@ GET _data_stream/dsl-data-stream "enabled": true, "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", - "next_generation_managed_by": "Data stream lifecycle", - "prefer_ilm": false, + "ilm_policy": "pre-dsl-ilm-policy", + "next_generation_managed_by": "Data stream lifecycle", + "prefer_ilm": false, "hidden": false, "system": false, "allow_custom_routing": false, @@ -344,7 +363,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] <1> The backing indices that existed before rollover will continue to be managed by {ilm-init} <2> The backing indices that existed before rollover will continue to be managed by {ilm-init} @@ -352,27 +371,30 @@ GET _data_stream/dsl-data-stream in the index template <4> The new write index is managed by `Data stream lifecycle` -We can easily change this data stream to be managed by {ilm-cap} because we didn't remove -the {ilm-cap} policy when we <>. We can achieve this in two ways: 1. <> from the data streams -2. Disable Data stream lifecycle by configured the `enabled` flag to `false`. +2. Disable data stream lifecycle by configuring the `enabled` flag to `false`. -Let's implement option 2 and disable the data stream lifecycle: +Let's implement option 2 and disable the data stream lifecycle: [source,console] ---- PUT _data_stream/dsl-data-stream/_lifecycle { "data_retention": "7d", - "enabled": false <1> + "enabled": false <1> } ---- // TEST[continued] -<1> The `enabled` flag can be ommitted and defaults to `true` however, here we +<1> The `enabled` flag can be ommitted and defaults to `true` however, here we explicitly configure it to `false` Let's check the state of the data stream: @@ -393,23 +415,23 @@ GET _data_stream/dsl-data-stream }, "indices": [ { - "index_name": ".ds-dsl-data-stream-2023.10.19-000001", + "index_name": ".ds-dsl-data-stream-2023.10.19-000001", "index_uuid": "xCEhwsp8Tey0-FLNFYVwSg", - "prefer_ilm": true, - "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" + "prefer_ilm": true, + "ilm_policy": "pre-dsl-ilm-policy", + "managed_by": "Index Lifecycle Management" }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000002", "index_uuid": "PA_JquKGSiKcAKBA8DJ5gw", "prefer_ilm": true, "ilm_policy": "pre-dsl-ilm-policy", - "managed_by": "Index Lifecycle Management" + "managed_by": "Index Lifecycle Management" }, { "index_name": ".ds-dsl-data-stream-2023.10.19-000003", "index_uuid": "PA_JquKGSiKcAKBA8abcd1", - "prefer_ilm": false, + "prefer_ilm": false, "ilm_policy": "pre-dsl-ilm-policy", "managed_by": "Index Lifecycle Management" <1> } @@ -421,9 +443,9 @@ GET _data_stream/dsl-data-stream "enabled": false, <2> "data_retention": "7d" }, - "ilm_policy": "pre-dsl-ilm-policy", - "next_generation_managed_by": "Index Lifecycle Management", <3> - "prefer_ilm": false, + "ilm_policy": "pre-dsl-ilm-policy", + "next_generation_managed_by": "Index Lifecycle Management", <3> + "prefer_ilm": false, "hidden": false, "system": false, "allow_custom_routing": false, @@ -438,14 +460,14 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] -<1> The write index is now managed by {ilm-cap} -<2> The `lifecycle` configured on the data stream is now disabled. -<3> The next write index will be managed by {ilm-cap} - -Had we removed the {ilm-cap} policy from the index template when we <> -it, the write index of the data stream will now be `Unmanaged` because the index -wouldn't have the {ilm-cap} policy configured to fallback onto. +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] +<1> The write index is now managed by {ilm-init} +<2> The `lifecycle` configured on the data stream is now disabled. +<3> The next write index will be managed by {ilm-init} + +Had we removed the {ilm-init} policy from the index template when we <> +it, the write index of the data stream will now be `Unmanaged` because the index +wouldn't have the {ilm-init} policy configured to fallback onto. ////////////////////////// [source,console] diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index 9503b6b6bb29d..2c1a16c81d011 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -28,6 +28,8 @@ Every indexing operation in Elasticsearch is first resolved to a replication gro typically based on the document ID. Once the replication group has been determined, the operation is forwarded internally to the current _primary shard_ of the group. This stage of indexing is referred to as the _coordinating stage_. +image::images/data_processing_flow.png[An example of a basic write model.] + The next stage of indexing is the _primary stage_, performed on the primary shard. The primary shard is responsible for validating the operation and forwarding it to the other replicas. Since replicas can be offline, the primary is not required to replicate to all replicas. Instead, Elasticsearch maintains a list of shard copies that should diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc index 82831ef943398..e54825406257f 100644 --- a/docs/reference/esql/esql-get-started.asciidoc +++ b/docs/reference/esql/esql-get-started.asciidoc @@ -7,50 +7,14 @@ This guide shows how you can use {esql} to query and aggregate your data. -TIP: To get started with {esql} without setting up your own deployment, visit -the public {esql} demo environment at -https://esql.demo.elastic.co/[esql.demo.elastic.co]. It comes with preloaded -data sets and sample queries. - [discrete] [[esql-getting-started-prerequisites]] === Prerequisites -To follow along with the queries in this getting started guide, first ingest -some sample data using the following requests: - -[source,console] ----- -PUT sample_data -{ - "mappings": { - "properties": { - "client.ip": { - "type": "ip" - }, - "message": { - "type": "keyword" - } - } - } -} - -PUT sample_data/_bulk -{"index": {}} -{"@timestamp": "2023-10-23T12:15:03.360Z", "client.ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event.duration": 3450233} -{"index": {}} -{"@timestamp": "2023-10-23T12:27:28.948Z", "client.ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event.duration": 2764889} -{"index": {}} -{"@timestamp": "2023-10-23T13:33:34.937Z", "client.ip": "172.21.0.5", "message": "Disconnected", "event.duration": 1232382} -{"index": {}} -{"@timestamp": "2023-10-23T13:51:54.732Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 725448} -{"index": {}} -{"@timestamp": "2023-10-23T13:52:55.015Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 8268153} -{"index": {}} -{"@timestamp": "2023-10-23T13:53:55.832Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 5033755} -{"index": {}} -{"@timestamp": "2023-10-23T13:55:01.543Z", "client.ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event.duration": 1756467} ----- +To follow along with the queries in this guide, you can either set up your own +deployment, or use Elastic's public {esql} demo environment. + +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc[] [discrete] [[esql-getting-started-running-queries]] @@ -58,7 +22,7 @@ PUT sample_data/_bulk In {kib}, you can use Console or Discover to run {esql} queries: -include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget.asciidoc[] +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc[] [discrete] [[esql-getting-started-first-query]] @@ -300,57 +264,9 @@ image::images/esql/esql-enrich.png[align="center"] Before you can use `ENRICH`, you first need to <> and <> -an <>. The following requests create and -execute a policy that links an IP address to an environment ("Development", -"QA", or "Production"): - -[source,console] ----- -PUT clientips -{ - "mappings": { - "properties": { - "client.ip": { - "type": "keyword" - }, - "env": { - "type": "keyword" - } - } - } -} - -PUT clientips/_bulk -{ "index" : {}} -{ "client.ip": "172.21.0.5", "env": "Development" } -{ "index" : {}} -{ "client.ip": "172.21.2.113", "env": "QA" } -{ "index" : {}} -{ "client.ip": "172.21.2.162", "env": "QA" } -{ "index" : {}} -{ "client.ip": "172.21.3.15", "env": "Production" } -{ "index" : {}} -{ "client.ip": "172.21.3.16", "env": "Production" } - -PUT /_enrich/policy/clientip_policy -{ - "match": { - "indices": "clientips", - "match_field": "client.ip", - "enrich_fields": ["env"] - } -} - -PUT /_enrich/policy/clientip_policy/_execute ----- - -//// -[source,console] ----- -DELETE /_enrich/policy/clientip_policy ----- -// TEST[continued] -//// +an <>. + +include::{es-repo-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[] After creating and executing a policy, you can use it with the `ENRICH` command: diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 96103fc135271..f1971fd409754 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -57,6 +57,7 @@ include::processing-commands/limit.asciidoc[tag=limitation] ** `completion` ** `dense_vector` ** `double_range` +** `flattened` ** `float_range` ** `histogram` ** `integer_range` @@ -72,6 +73,35 @@ unsupported type is not explicitly used in a query, it is returned with `null` values, with the exception of nested fields. Nested fields are not returned at all. +[discrete] +[[esql-limitations-full-text-search]] +=== Full-text search is not supported + +Because of <>, +full-text search is not yet supported. Queries on `text` fields are like queries +on `keyword` fields: they are case-sensitive and need to match the full string. + +For example, after indexing a field of type `text` with the value `Elasticsearch +query language`, the following `WHERE` clause does not match because the `LIKE` +operator is case-sensitive: +[source,esql] +---- +| WHERE field LIKE "elasticsearch query language" +---- + +The following `WHERE` clause does not match either, because the `LIKE` operator +tries to match the whole string: +[source,esql] +---- +| WHERE field LIKE "Elasticsearch" +---- + +As a workaround, use wildcards and regular expressions. For example: +[source,esql] +---- +| WHERE field RLIKE "[Ee]lasticsearch.*" +---- + [discrete] [[esql-limitations-text-fields]] === `text` fields behave like `keyword` fields @@ -136,6 +166,33 @@ now() - 2023-10-26 include::esql-enrich-data.asciidoc[tag=limitations] +[discrete] +[[esql-limitations-dissect]] +=== Dissect limitations + +include::esql-process-data-with-dissect-grok.asciidoc[tag=dissect-limitations] + +[discrete] +[[esql-limitations-grok]] +=== Grok limitations + +include::esql-process-data-with-dissect-grok.asciidoc[tag=grok-limitations] + +[discrete] +[[esql-limitations-mv]] +=== Multivalue limitations + +{esql} <>, but functions +return `null` when applied to a multivalued field, unless documented otherwise. +Work around this limitation by converting the field to single value with one of +the <>. + +[discrete] +[[esql-limitations-timezone]] +=== Timezone support + +{esql} only supports the UTC timezone. + [discrete] [[esql-limitations-kibana]] === Kibana limitations diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc index a37989b2b2da8..a13633a9f8d92 100644 --- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc +++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc @@ -62,8 +62,9 @@ clientip:keyword | @timestamp:keyword | status:keyword include::../ingest/processors/dissect.asciidoc[tag=intro-example-explanation] -An empty key `%{}` or a <> can be used to -match values, but exclude the value from the output. +A <> can be used to match values, but +exclude the value from the output. +// TODO: Change back to original text when https://github.com/elastic/elasticsearch/pull/102580 is merged All matched values are output as keyword string data types. Use the <> to convert to another data type. @@ -120,28 +121,86 @@ include::../ingest/processors/dissect.asciidoc[tag=dissect-key-modifiers] | `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <> | `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> | `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> -| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> |====== [[esql-dissect-modifier-skip-right-padding]] ====== Right padding modifier (`->`) include::../ingest/processors/dissect.asciidoc[tag=dissect-modifier-skip-right-padding] +For example: +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectRightPaddingModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectRightPaddingModifier-result] +|=== + +//// +// TODO: Re-enable when https://github.com/elastic/elasticsearch/pull/102580 is merged +include::../ingest/processors/dissect.asciidoc[tag=dissect-modifier-empty-right-padding] + +For example: +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectEmptyRightPaddingModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectEmptyRightPaddingModifier-result] +|=== +//// + [[esql-append-modifier]] ====== Append modifier (`+`) include::../ingest/processors/dissect.asciidoc[tag=append-modifier] +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectAppendModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectAppendModifier-result] +|=== + [[esql-append-order-modifier]] ====== Append with order modifier (`+` and `/n`) include::../ingest/processors/dissect.asciidoc[tag=append-order-modifier] +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectAppendWithOrderModifier] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectAppendWithOrderModifier-result] +|=== + [[esql-named-skip-key]] ====== Named skip key (`?`) -include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] +// include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] +// TODO: Re-enable when https://github.com/elastic/elasticsearch/pull/102580 is merged + +Dissect supports ignoring matches in the final result. This can be done with a +named skip key using the `{?name}` syntax: -[[esql-reference-keys]] -====== Reference keys (`*` and `&`) -include::../ingest/processors/dissect.asciidoc[tag=reference-keys] +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dissectNamedSkipKey] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dissectNamedSkipKey-result] +|=== + +[[esql-dissect-limitations]] +===== Limitations + +// tag::dissect-limitations[] +The `DISSECT` command does not support reference keys and empty keys. +// end::dissect-limitations[] [[esql-process-data-with-grok]] ==== Process data with `GROK` @@ -161,7 +220,14 @@ matches a log line of this format: 1.2.3.4 [2023-01-23T12:15:00.000Z] Connected ---- -and results in adding the following columns to the input table: +Putting it together as an {esql} query: + +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] +---- + +`GROK` adds the following columns to the input table: [%header.monospaced.styled,format=dsv,separator=|] |=== @@ -169,6 +235,25 @@ and results in adding the following columns to the input table: 2023-01-23T12:15:00.000Z | 1.2.3.4 | Connected |=== +[NOTE] +==== + +Special regex characters in grok patterns, like `[` and `]` need to be escaped +with a `\`. For example, in the earlier pattern: +[source,txt] +---- +%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status} +---- + +In {esql} queries, the backslash character itself is a special character that +needs to be escaped with another `\`. For this example, the corresponding {esql} +query becomes: +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] +---- +==== + [[esql-grok-patterns]] ===== Grok patterns @@ -202,24 +287,6 @@ as well. Grok uses the Oniguruma regular expression library. Refer to https://github.com/kkos/oniguruma/blob/master/doc/RE[the Oniguruma GitHub repository] for the full supported regexp syntax. -[NOTE] -==== -Special regex characters like `[` and `]` need to be escaped with a `\`. For -example, in the earlier pattern: -[source,txt] ----- -%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status} ----- - -In {esql} queries, the backslash character itself is a special character that -needs to be escaped with another `\`. For this example, the corresponding {esql} -query becomes: -[source.merge.styled,esql] ----- -include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] ----- -==== - [[esql-custom-patterns]] ===== Custom patterns @@ -253,6 +320,8 @@ as the `GROK` command. [[esql-grok-limitations]] ===== Limitations +// tag::grok-limitations[] The `GROK` command does not support configuring <>, or <>. The `GROK` command is not subject to <>. +// end::grok-limitations[] \ No newline at end of file diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index 437871d31a88f..afa9ab7254cfa 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -68,11 +68,6 @@ responses. See <>. `query`:: (Required, object) {esql} query to run. For syntax, refer to <>. -[[esql-search-api-time-zone]] -`time_zone`:: -(Optional, string) ISO-8601 time zone ID for the search. Several {esql} -date/time functions use this time zone. Defaults to `Z` (UTC). - [discrete] [role="child_attributes"] [[esql-query-api-response-body]] diff --git a/docs/reference/esql/esql-syntax.asciidoc b/docs/reference/esql/esql-syntax.asciidoc index 725b1d3ff1e03..22c9b1f100827 100644 --- a/docs/reference/esql/esql-syntax.asciidoc +++ b/docs/reference/esql/esql-syntax.asciidoc @@ -9,7 +9,7 @@ [[esql-basic-syntax]] === Basic syntax -An {esql} query is composed of a <> followed +An {esql} query is composed of a <> followed by an optional series of <>, separated by a pipe character: `|`. For example: @@ -36,6 +36,101 @@ source-command | processing-command1 | processing-command2 ---- ==== +[discrete] +[[esql-identifiers]] +==== Identifiers + +The identifiers can be used as they are and don't require quoting, unless +containing special characters, in which case they must be quoted with +backticks (+{backtick}+). What "special characters" means is command dependent. + +For <>, <>, <>, +<>, <> and +<> these are: `=`, +{backtick}+, `,`, ` ` (space), `|` , +`[`, `]`, `\t` (TAB), `\r` (CR), `\n` (LF); one `/` is allowed unquoted, but +a sequence of two or more require quoting. + +The rest of the commands - those allowing for identifiers be used in +expressions - require quoting if the identifier contains characters other than +letters, numbers and `_` and doesn't start with a letter, `_` or `@`. + +For instance: + +[source,esql] +---- +// Retain just one field +FROM index +| KEEP 1.field +---- + +is legal. However, if same field is to be used with an <>, +it'd have to be quoted: + +[source,esql] +---- +// Copy one field +FROM index +| EVAL my_field = `1.field` +---- + +[discrete] +[[esql-literals]] +==== Literals + +{esql} currently supports numeric and string literals. + +[discrete] +[[esql-string-literals]] +===== String literals + +A string literal is a sequence of unicode characters delimited by double +quotes (`"`). + +[source,esql] +---- +// Filter by a string value +FROM index +| WHERE first_name == "Georgi" +---- + +If the literal string itself contains quotes, these need to be escaped (`\\"`). +{esql} also supports the triple-quotes (`"""`) delimiter, for convenience: + +[source,esql] +---- +ROW name = """Indiana "Indy" Jones""" +---- + +The special characters CR, LF and TAB can be provided with the usual escaping: +`\r`, `\n`, `\t`, respectively. + +[discrete] +[[esql-numeric-literals]] +===== Numerical literals + +The numeric literals are accepted in decimal and in the scientific notation +with the exponent marker (`e` or `E`), starting either with a digit, decimal +point `.` or the negative sign `-`: + +[source, sql] +---- +1969 -- integer notation +3.14 -- decimal notation +.1234 -- decimal notation starting with decimal point +4E5 -- scientific notation (with exponent marker) +1.2e-3 -- scientific notation with decimal point +-.1e2 -- scientific notation starting with the negative sign +---- + +The integer numeric literals are implicitly converted to the `integer`, `long` +or the `double` type, whichever can first accommodate the literal's value. + +The floating point literals are implicitly converted the `double` type. + +To obtain constant values of different types, use one of the numeric +<>. + + [discrete] [[esql-comments]] ==== Comments diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index dbab521ead4d1..235c7defe559b 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -9,8 +9,8 @@ Using {esql} in {kib} to query and aggregate your data, create visualizations, and set up alerts. <>:: -Using {esql} in {elastic-sec} to investigate events in Timeline and create -detection rules. +Using {esql} in {elastic-sec} to investigate events in Timeline, create +detection rules, and build {esql} queries using Elastic AI Assistant. <>:: Using the <> to list and cancel {esql} queries. @@ -18,4 +18,4 @@ Using the <> to list and cancel {esql} queries. include::esql-rest.asciidoc[] include::esql-kibana.asciidoc[] include::esql-security-solution.asciidoc[] -include::task-management.asciidoc[] \ No newline at end of file +include::task-management.asciidoc[] diff --git a/docs/reference/esql/functions/in.asciidoc b/docs/reference/esql/functions/in.asciidoc index be5688250ecc7..c64c64873f7cb 100644 --- a/docs/reference/esql/functions/in.asciidoc +++ b/docs/reference/esql/functions/in.asciidoc @@ -2,10 +2,16 @@ [[esql-in-operator]] === `IN` +//tag::body[] The `IN` operator allows testing whether a field or expression equals an element in a list of literals, fields or expressions: -[source,esql] +[source.merge.styled,esql] ---- include::{esql-specs}/row.csv-spec[tag=in-with-expressions] ----- \ No newline at end of file +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/row.csv-spec[tag=in-with-expressions-result] +|=== +//end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/like.asciidoc b/docs/reference/esql/functions/like.asciidoc index 9d06a3d051b93..d89b6715f86eb 100644 --- a/docs/reference/esql/functions/like.asciidoc +++ b/docs/reference/esql/functions/like.asciidoc @@ -2,6 +2,7 @@ [[esql-like-operator]] === `LIKE` +// tag::body[] Use `LIKE` to filter data based on string patterns using wildcards. `LIKE` usually acts on a field placed on the left-hand side of the operator, but it can also act on a constant (literal) expression. The right-hand side of the operator @@ -12,9 +13,12 @@ The following wildcard characters are supported: * `*` matches zero or more characters. * `?` matches one character. -[source,esql] +[source.merge.styled,esql] ---- -FROM employees -| WHERE first_name LIKE "?b*" -| KEEP first_name, last_name +include::{esql-specs}/docs.csv-spec[tag=like] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=like-result] +|=== +// end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/ltrim.asciidoc b/docs/reference/esql/functions/ltrim.asciidoc index 6e6d30a73b865..e5230e4edd41a 100644 --- a/docs/reference/esql/functions/ltrim.asciidoc +++ b/docs/reference/esql/functions/ltrim.asciidoc @@ -1,6 +1,9 @@ [discrete] [[esql-ltrim]] === `LTRIM` +[.text-center] +image::esql/functions/signature/ltrim.svg[Embedded,opts=inline] + Removes leading whitespaces from strings. [source.merge.styled,esql] @@ -11,3 +14,7 @@ include::{esql-specs}/string.csv-spec[tag=ltrim] |=== include::{esql-specs}/string.csv-spec[tag=ltrim-result] |=== + +Supported types: + +include::types/rtrim.asciidoc[] diff --git a/docs/reference/esql/functions/pow.asciidoc b/docs/reference/esql/functions/pow.asciidoc index 9f7805bfd3eae..b13151c8cbd76 100644 --- a/docs/reference/esql/functions/pow.asciidoc +++ b/docs/reference/esql/functions/pow.asciidoc @@ -5,7 +5,8 @@ image::esql/functions/signature/pow.svg[Embedded,opts=inline] Returns the value of a base (first argument) raised to the power of an exponent (second argument). -Both arguments must be numeric. +Both arguments must be numeric. The output is always a double. Note that it is still possible to overflow +a double result here; in that case, null will be returned. [source.merge.styled,esql] ---- @@ -16,62 +17,6 @@ include::{esql-specs}/math.csv-spec[tag=powDI] include::{esql-specs}/math.csv-spec[tag=powDI-result] |=== -[discrete] -==== Type rules - -The type of the returned value is determined by the types of the base and exponent. -The following rules are applied to determine the result type: - -* If either of the base or exponent are of a floating point type, the result will be a double -* Otherwise, if either the base or the exponent are 64-bit (long or unsigned long), the result will be a long -* Otherwise, the result will be a 32-bit integer (this covers all other numeric types, including int, short and byte) - -For example, using simple integers as arguments will lead to an integer result: - -[source.merge.styled,esql] ----- -include::{esql-specs}/math.csv-spec[tag=powII] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=powII-result] -|=== - -NOTE: The actual power function is performed using double precision values for all cases. -This means that for very large non-floating point values there is a small chance that the -operation can lead to slightly different answers than expected. -However, a more likely outcome of very large non-floating point values is numerical overflow. - -[discrete] -==== Arithmetic errors - -Arithmetic errors and numeric overflow do not result in an error. Instead, the result will be `null` -and a warning for the `ArithmeticException` added. -For example: - -[source.merge.styled,esql] ----- -include::{esql-specs}/math.csv-spec[tag=powULOverrun] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=powULOverrun-warning] -|=== -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=powULOverrun-result] -|=== - -If it is desired to protect against numerical overruns, use `TO_DOUBLE` on either of the arguments: - -[source.merge.styled,esql] ----- -include::{esql-specs}/math.csv-spec[tag=pow2d] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/math.csv-spec[tag=pow2d-result] -|=== [discrete] ==== Fractional exponents diff --git a/docs/reference/esql/functions/predicates.asciidoc b/docs/reference/esql/functions/predicates.asciidoc index 9a3ea89e9aa73..16b461b40ebf7 100644 --- a/docs/reference/esql/functions/predicates.asciidoc +++ b/docs/reference/esql/functions/predicates.asciidoc @@ -2,6 +2,7 @@ [[esql-predicates]] === `IS NULL` and `IS NOT NULL` predicates +//tag::body[] For NULL comparison, use the `IS NULL` and `IS NOT NULL` predicates: [source.merge.styled,esql] @@ -21,3 +22,4 @@ include::{esql-specs}/null.csv-spec[tag=is-not-null] |=== include::{esql-specs}/null.csv-spec[tag=is-not-null-result] |=== +//end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/rlike.asciidoc b/docs/reference/esql/functions/rlike.asciidoc index 0fd8d8ab319da..1cdbbe6964123 100644 --- a/docs/reference/esql/functions/rlike.asciidoc +++ b/docs/reference/esql/functions/rlike.asciidoc @@ -2,14 +2,18 @@ [[esql-rlike-operator]] ==== `RLIKE` +// tag::body[] Use `RLIKE` to filter data based on string patterns using using <>. `RLIKE` usually acts on a field placed on the left-hand side of the operator, but it can also act on a constant (literal) expression. The right-hand side of the operator represents the pattern. -[source,esql] +[source.merge.styled,esql] ---- -FROM employees -| WHERE first_name RLIKE ".leja.*" -| KEEP first_name, last_name ----- \ No newline at end of file +include::{esql-specs}/docs.csv-spec[tag=rlike] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=rlike-result] +|=== +// end::body[] \ No newline at end of file diff --git a/docs/reference/esql/functions/rtrim.asciidoc b/docs/reference/esql/functions/rtrim.asciidoc index 3224331e9ed6a..8eb0494e90d9e 100644 --- a/docs/reference/esql/functions/rtrim.asciidoc +++ b/docs/reference/esql/functions/rtrim.asciidoc @@ -1,6 +1,9 @@ [discrete] [[esql-rtrim]] === `RTRIM` +[.text-center] +image::esql/functions/signature/rtrim.svg[Embedded,opts=inline] + Removes trailing whitespaces from strings. [source.merge.styled,esql] @@ -11,3 +14,7 @@ include::{esql-specs}/string.csv-spec[tag=rtrim] |=== include::{esql-specs}/string.csv-spec[tag=rtrim-result] |=== + +Supported types: + +include::types/rtrim.asciidoc[] diff --git a/docs/reference/esql/functions/signature/case.svg b/docs/reference/esql/functions/signature/case.svg deleted file mode 100644 index 09e8f7efa2835..0000000000000 --- a/docs/reference/esql/functions/signature/case.svg +++ /dev/null @@ -1 +0,0 @@ -CASE(arg1,arg2) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/ltrim.svg b/docs/reference/esql/functions/signature/ltrim.svg index ad7a4da0248e6..327e75b92ca19 100644 --- a/docs/reference/esql/functions/signature/ltrim.svg +++ b/docs/reference/esql/functions/signature/ltrim.svg @@ -1 +1 @@ -LTRIM(arg1) \ No newline at end of file +LTRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/rtrim.svg b/docs/reference/esql/functions/signature/rtrim.svg index 3d95ddf5ef6ef..b830bb59c5c31 100644 --- a/docs/reference/esql/functions/signature/rtrim.svg +++ b/docs/reference/esql/functions/signature/rtrim.svg @@ -1 +1 @@ -RTRIM(arg1) \ No newline at end of file +RTRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/trim.svg b/docs/reference/esql/functions/signature/trim.svg index 6f1273142fa51..5fc865d306f11 100644 --- a/docs/reference/esql/functions/signature/trim.svg +++ b/docs/reference/esql/functions/signature/trim.svg @@ -1 +1 @@ -TRIM(arg1) \ No newline at end of file +TRIM(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/starts_with.asciidoc b/docs/reference/esql/functions/starts_with.asciidoc index 38cee79ea63f8..f98a76ef68206 100644 --- a/docs/reference/esql/functions/starts_with.asciidoc +++ b/docs/reference/esql/functions/starts_with.asciidoc @@ -2,7 +2,7 @@ [[esql-starts_with]] === `STARTS_WITH` [.text-center] -image::esql/functions/signature/ends_with.svg[Embedded,opts=inline] +image::esql/functions/signature/starts_with.svg[Embedded,opts=inline] Returns a boolean that indicates whether a keyword string starts with another string: diff --git a/docs/reference/esql/functions/types/ltrim.asciidoc b/docs/reference/esql/functions/types/ltrim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/ltrim.asciidoc +++ b/docs/reference/esql/functions/types/ltrim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/functions/types/pow.asciidoc b/docs/reference/esql/functions/types/pow.asciidoc index 37bddc60c118f..0e22c123ebf53 100644 --- a/docs/reference/esql/functions/types/pow.asciidoc +++ b/docs/reference/esql/functions/types/pow.asciidoc @@ -3,8 +3,18 @@ base | exponent | result double | double | double double | integer | double +double | long | double +double | unsigned_long | double integer | double | double -integer | integer | integer +integer | integer | double +integer | long | double +integer | unsigned_long | double long | double | double -long | integer | long +long | integer | double +long | long | double +long | unsigned_long | double +unsigned_long | double | double +unsigned_long | integer | double +unsigned_long | long | double +unsigned_long | unsigned_long | double |=== diff --git a/docs/reference/esql/functions/types/rtrim.asciidoc b/docs/reference/esql/functions/types/rtrim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/rtrim.asciidoc +++ b/docs/reference/esql/functions/types/rtrim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/functions/types/trim.asciidoc b/docs/reference/esql/functions/types/trim.asciidoc index 11c02c8f0c3bb..26f4e7633d8ae 100644 --- a/docs/reference/esql/functions/types/trim.asciidoc +++ b/docs/reference/esql/functions/types/trim.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +str | result keyword | keyword text | text |=== diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc index e723a977bf99c..973b163b08b10 100644 --- a/docs/reference/esql/processing-commands/where.asciidoc +++ b/docs/reference/esql/processing-commands/where.asciidoc @@ -19,9 +19,6 @@ A boolean expression. The `WHERE` processing command produces a table that contains all the rows from the input table for which the provided condition evaluates to `true`. -`WHERE` supports various <> and -<>. - *Examples* [source,esql] @@ -36,9 +33,22 @@ Which, if `still_hired` is a boolean field, can be simplified to: include::{esql-specs}/docs.csv-spec[tag=whereBoolean] ---- -Using a function: +`WHERE` supports various <>. For example the +<> function: [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=whereFunction] ---- + +For a complete list of all functions, refer to <>. + +include::../functions/predicates.asciidoc[tag=body] + +include::../functions/like.asciidoc[tag=body] + +include::../functions/rlike.asciidoc[tag=body] + +include::../functions/in.asciidoc[tag=body] + +For a complete list of all operators, refer to <>. \ No newline at end of file diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc new file mode 100644 index 0000000000000..3e474953a72f9 --- /dev/null +++ b/docs/reference/getting-started.asciidoc @@ -0,0 +1,285 @@ +[chapter] +[[getting-started]] += Quick start + +This guide helps you learn how to: + +* install and run {es} and {kib} (using {ecloud} or Docker), +* add simple (non-timestamped) dataset to {es}, +* run basic searches. + +[TIP] +==== +If you're interested in using {es} with Python, check out Elastic Search Labs. This is the best place to explore AI-powered search use cases, such as working with embeddings, vector search, and retrieval augmented generation (RAG). + +* https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: this walks you through building a complete search solution with {es}, from the ground up. +* https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs` repository]: it contains a range of Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps]. +==== + +[discrete] +[[run-elasticsearch]] +=== Run {es} + +The simplest way to set up {es} is to create a managed deployment with {ess} on +{ecloud}. If you prefer to manage your own test environment, install and +run {es} using Docker. + +include::{es-repo-dir}/tab-widgets/code.asciidoc[] +include::{es-repo-dir}/tab-widgets/quick-start-install-widget.asciidoc[] + +[discrete] +[[send-requests-to-elasticsearch]] +=== Send requests to {es} + +You send data and other requests to {es} using REST APIs. This lets you interact +with {es} using any client that sends HTTP requests, such as +https://curl.se[curl]. You can also use {kib}'s Console to send requests to +{es}. + +include::{es-repo-dir}/tab-widgets/api-call-widget.asciidoc[] + +[discrete] +[[add-data]] +=== Add data + +You add data to {es} as JSON objects called documents. {es} stores these +documents in searchable indices. + +[discrete] +[[add-single-document]] +==== Add a single document + +Submit the following indexing request to add a single document to the +`books` index. +The request automatically creates the index. + +//// +[source,console] +---- +PUT books +---- +// TESTSETUP +//// + +[source,console] +---- +POST books/_doc +{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470} +---- +// TEST[s/_doc/_doc?refresh=wait_for/] + +The response includes metadata that {es} generates for the document including a unique `_id` for the document within the index. + +.Expand to see example response +[%collapsible] +=============== +[source,console-result] +---- +{ + "_index": "books", + "_id": "O0lG2IsBaSa7VYx_rEia", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 0, + "_primary_term": 1 +} +---- +// TEST[skip:TODO] +=============== + +[discrete] +[[add-multiple-documents]] +==== Add multiple documents + +Use the `_bulk` endpoint to add multiple documents in one request. Bulk data +must be newline-delimited JSON (NDJSON). Each line must end in a newline +character (`\n`), including the last line. + +[source,console] +---- +POST /_bulk +{ "index" : { "_index" : "books" } } +{"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585} +{ "index" : { "_index" : "books" } } +{"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328} +{ "index" : { "_index" : "books" } } +{"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227} +{ "index" : { "_index" : "books" } } +{"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268} +{ "index" : { "_index" : "books" } } +{"name": "The Handmaids Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311} +---- +// TEST[continued] + +You should receive a response indicating there were no errors. + +.Expand to see example response +[%collapsible] +=============== +[source,console-result] +---- +{ + "errors": false, + "took": 29, + "items": [ + { + "index": { + "_index": "books", + "_id": "QklI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 1, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "Q0lI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 2, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "RElI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 3, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "RUlI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 4, + "_primary_term": 1, + "status": 201 + } + }, + { + "index": { + "_index": "books", + "_id": "RklI2IsBaSa7VYx_Qkh-", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "_seq_no": 5, + "_primary_term": 1, + "status": 201 + } + } + ] +} +---- +// TEST[skip:TODO] +=============== + +[discrete] +[[qs-search-data]] +=== Search data + +Indexed documents are available for search in near real-time. + +[discrete] +[[search-all-documents]] +==== Search all documents + +Run the following command to search the `books` index for all documents: +[source,console] +---- +GET books/_search +---- +// TEST[continued] + +The `_source` of each hit contains the original +JSON object submitted during indexing. + +[discrete] +[[qs-match-query]] +==== `match` query + +You can use the `match` query to search for documents that contain a specific value in a specific field. +This is the standard query for performing full-text search, including fuzzy matching and phrase searches. + +Run the following command to search the `books` index for documents containing `brave` in the `name` field: +[source,console] +---- +GET books/_search +{ + "query": { + "match": { + "name": "brave" + } + } +} +---- +// TEST[continued] + +[discrete] +[[whats-next]] +=== Next steps + +Now that {es} is up and running and you've learned the basics, you'll probably want to test out larger datasets, or index your own data. + +[discrete] +[[whats-next-search-learn-more]] +==== Learn more about search queries + +* <>. Jump here to learn about exact value search, full-text search, vector search, and more, using the <>. + +[discrete] +[[whats-next-more-data]] +==== Add more data + +* Learn how to {kibana-ref}/sample-data.html[install sample data] using {kib}. This is a quick way to test out {es} on larger workloads. +* Learn how to use the {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[upload data UI] in {kib} to add your own CSV, TSV, or JSON files. +* Use the https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[bulk API] to ingest your own datasets to {es}. + +[discrete] +[[whats-next-client-libraries]] +==== {es} programming language clients + +* Check out our https://www.elastic.co/guide/en/elasticsearch/client/index.html[client library] to work with your {es} instance in your preferred programming language. +* If you're using Python, check out https://www.elastic.co/search-labs[Elastic Search Labs] for a range of examples that use the {es} Python client. This is the best place to explore AI-powered search use cases, such as working with embeddings, vector search, and retrieval augmented generation (RAG). +** This extensive, hands-on https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[tutorial] +walks you through building a complete search solution with {es}, from the ground up. +** https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] contains a range of executable Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps]. \ No newline at end of file diff --git a/docs/reference/images/data_processing_flow.png b/docs/reference/images/data_processing_flow.png new file mode 100644 index 0000000000000..9b2f58ad61166 Binary files /dev/null and b/docs/reference/images/data_processing_flow.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 4f15bb1c1d694..31fe747feb63b 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -16,7 +16,10 @@ Index level settings can be set per-index. Settings may be: _static_:: They can only be set at index creation time or on a -<>. +<>, or by using the +<> API with the +`reopen` query parameter set to `true` (which automatically +closes and reopens impacted indices). _dynamic_:: diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 828a3e4d1d01d..b09d67e990636 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -17,6 +17,8 @@ include::intro.asciidoc[] include::release-notes/highlights.asciidoc[] +include::getting-started.asciidoc[] + include::setup.asciidoc[] include::upgrade.asciidoc[] diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 1faee74ae953c..7701aa9f64cfe 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -156,8 +156,8 @@ Universally unique identifier (UUID) for the index. `prefer_ilm`:: (boolean) -Functionality in preview:[]. Indicates if this index is configured to prefer {ilm} -when both {ilm-cap} and <> are configured to +Functionality in preview:[]. Indicates if this index is configured to prefer {ilm} +when both {ilm-cap} and <> are configured to manage this index. `managed_by`:: @@ -223,8 +223,8 @@ Functionality in preview:[]. Indicates the system that will managed the next gen `prefer_ilm`:: (boolean) -Functionality in preview:[]. Indicates if the index template used to create the data -stream's backing indices is configured to prefer {ilm-cap} when both {ilm-cap} and +Functionality in preview:[]. Indicates if the index template used to create the data +stream's backing indices is configured to prefer {ilm-cap} when both {ilm-cap} and <> are configured to manage this index. `hidden`:: @@ -351,3 +351,4 @@ The API returns the following response: // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-two-2099.03.08-000001"/"index_name": $body.data_streams.1.indices.0.index_name/] // TESTRESPONSE[s/"index_uuid": "3liBu2SYS5axasRt6fUIpA"/"index_uuid": $body.data_streams.1.indices.0.index_uuid/] // TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 45531dd58ccfc..1ac9ecbb6a6a3 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -60,6 +60,16 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailab (Optional, Boolean) If `true`, existing index settings remain unchanged. Defaults to `false`. +`reopen`:: +(Optional, Boolean) If `true`, then any static settings that would ordinarily only +be updated on closed indices will be updated by automatically closing and reopening +the affected indices. If `false`, attempts to update static settings on open indices +will fail. Defaults to `false`. + +NOTE: Changing index settings on an automatically closed index using the `reopen` +parameter will result in the index becoming unavailable momentarily while the index +is in the process of reopening. + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index f26a73d093091..f8515a8b33c39 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -25,9 +25,9 @@ Performs an inference task on an input text by using an {infer} model. [[post-inference-api-desc]] ==== {api-description-title} -The perform {infer} API enables you to use {infer} models to perform specific -tasks on data that you provide as an input. The API returns a response with the -resutls of the tasks. The {infer} model you use can perform one specific task +The perform {infer} API enables you to use {infer} models to perform specific +tasks on data that you provide as an input. The API returns a response with the +resutls of the tasks. The {infer} model you use can perform one specific task that has been defined when the model was created with the <>. @@ -50,8 +50,9 @@ The type of {infer} task that the model performs. == {api-request-body-title} `input`:: -(Required, string) +(Required, array of strings) The text on which you want to perform the {infer} task. +`input` can be a single string or an array. [discrete] @@ -77,23 +78,26 @@ The API returns the following response: [source,console-result] ------------------------------------------------------------ { - "sparse_embedding": { - "port": 2.1259406, - "sky": 1.7073475, - "color": 1.6922266, - "dead": 1.6247464, - "television": 1.3525393, - "above": 1.2425821, - "tuned": 1.1440028, - "colors": 1.1218185, - "tv": 1.0111054, - "ports": 1.0067928, - "poem": 1.0042328, - "channel": 0.99471164, - "tune": 0.96235967, - "scene": 0.9020516, + "sparse_embedding": [ + { + "port": 2.1259406, + "sky": 1.7073475, + "color": 1.6922266, + "dead": 1.6247464, + "television": 1.3525393, + "above": 1.2425821, + "tuned": 1.1440028, + "colors": 1.1218185, + "tv": 1.0111054, + "ports": 1.0067928, + "poem": 1.0042328, + "channel": 0.99471164, + "tune": 0.96235967, + "scene": 0.9020516, + (...) + }, (...) - } + ] } ------------------------------------------------------------ -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 3b8cd19aded53..9f0539fb551cb 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -52,18 +52,67 @@ The type of the {infer} task that the model will perform. Available task types: (Required, string) The type of service supported for the specified task type. Available services: -* `elser` +* `elser`, +* `openai`. `service_settings`:: (Required, object) Settings used to install the {infer} model. These settings are specific to the `service` you specified. ++ +.`service_settings` for `elser` +[%collapsible%closed] +===== +`num_allocations`::: +(Required, integer) +The number of model allocations to create. + +`num_threads`::: +(Required, integer) +The number of threads to use by each model allocation. +===== ++ +.`service_settings` for `openai` +[%collapsible%closed] +===== +`api_key`::: +(Required, string) +A valid API key of your OpenAI account. You can find your OpenAI API keys in +your OpenAI account under the +https://platform.openai.com/api-keys[API keys section]. + +IMPORTANT: You need to provide the API key only once, during the {infer} model +creation. The <> does not retrieve your API key. After +creating the {infer} model, you cannot change the associated API key. If you +want to use a different API key, delete the {infer} model and recreate it with +the same name and the updated API key. + +`organization_id`::: +(Optional, string) +The unique identifier of your organization. You can find the Organization ID in +your OpenAI account under +https://platform.openai.com/account/organization[**Settings** > **Organizations**]. + +`url`::: +(Optional, string) +The URL endpoint to use for the requests. Can be changed for testing purposes. +Defaults to `https://api.openai.com/v1/embeddings`. +===== `task_settings`:: (Optional, object) Settings to configure the {infer} task. These settings are specific to the `` you specified. - ++ +.`task_settings` for `text_embedding` +[%collapsible%closed] +===== +`model`::: +(Optional, string) +The name of the model to use for the {infer} task. Refer to the +https://platform.openai.com/docs/guides/embeddings/what-are-embeddings[OpenAI documentation] +for the list of available text embedding models. +===== [discrete] [[put-inference-api-example]] @@ -103,3 +152,22 @@ Example response: } ------------------------------------------------------------ // NOTCONSOLE + + +The following example shows how to create an {infer} model called +`openai_embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/openai_embeddings +{ + "service": "openai", + "service_settings": { + "api_key": "" + }, + "task_settings": { + "model": "text-embedding-ada-002" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc index ee3af9c21de8f..ebad9f09250d3 100644 --- a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc @@ -37,8 +37,9 @@ PUT /_enrich/policy/my-policy [source,console] -------------------------------------------------- -PUT /_enrich/policy/my-policy/_execute +PUT /_enrich/policy/my-policy/_execute?wait_for_completion=false -------------------------------------------------- +// TEST[s/\?wait_for_completion=false//] //// [source,console] @@ -93,8 +94,13 @@ The previous enrich index will deleted with a delayed maintenance job. By default this is done every 15 minutes. // end::update-enrich-index[] -Because this API request performs several operations, -it may take a while to return a response. +By default, this API is synchronous: It returns when a policy has been executed. +Because executing a policy performs several operations, it may take a while to +return a response, especially when the source indices are large. This can lead +to timeouts. To prevent timeouts, set the `wait_for_completion` parameter to +`false`. This runs the request asynchronously in the background, and returns a +task ID. You can use the task ID to manage the request with the <>. [[execute-enrich-policy-api-path-params]] ==== {api-path-parms-title} @@ -107,6 +113,7 @@ Enrich policy to execute. ==== {api-query-parms-title} `wait_for_completion`:: -(Required, Boolean) -If `true`, the request blocks other enrich policy execution requests until -complete. Defaults to `true`. +(Optional, Boolean) +If `true`, the request blocks until execution is complete. If `false`, the +request returns immediately and execution runs asynchronously in the background. +Defaults to `true`. diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index 772c35d542c2f..04fcd500a9721 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -29,3 +29,4 @@ include::delete-pipeline.asciidoc[] include::geoip-stats-api.asciidoc[] include::get-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] +include::simulate-ingest.asciidoc[] diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc new file mode 100644 index 0000000000000..36f1f089ce90e --- /dev/null +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -0,0 +1,361 @@ + +[[simulate-ingest-api]] +=== Simulate ingest API +++++ +Simulate ingest +++++ + +Executes ingest pipelines against a set of provided documents, optionally +with substitute pipeline definitions. This API is meant to be used for +troubleshooting or pipeline development, as it does not actually index any +data into {es}. + +//// +[source,console] +---- +PUT /_ingest/pipeline/my-pipeline +{ + "description" : "example pipeline to simulate", + "processors": [ + { + "set" : { + "field" : "field1", + "value" : "value1" + } + } + ] +} + +PUT /_ingest/pipeline/my-final-pipeline +{ + "description" : "example final pipeline to simulate", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "value2" + } + } + ] +} + +PUT /my-index +{ + "settings": { + "index": { + "default_pipeline": "my-pipeline", + "final_pipeline": "my-final-pipeline" + } + } +} +---- +// TESTSETUP +//// + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { <1> + "my-pipeline": { + "processors": [ + { + "set": { + "field": "field3", + "value": "value3" + } + } + ] + } + } +} +---- + +<1> This replaces the existing `my-pipeline` pipeline with the contents given here for the duration of this request. + +[[simulate-ingest-api-request]] +==== {api-request-title} + +`POST /_ingest/_simulate` + +`GET /_ingest/_simulate` + +`POST /_ingest//_simulate` + +`GET /_ingest//_simulate` + +[[simulate-ingest-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`index` or `create` <> +to use this API. + +[[simulate-ingest-api-desc]] +==== {api-description-title} + +The simulate ingest API simulates ingesting data into an index. It +executes the default and final pipeline for that index against a set +of documents provided in the body of the request. If a pipeline +contains a <>, it follows that +reroute processor to the new index, executing that index's pipelines +as well the same way that a non-simulated ingest would. No data is +indexed into {es}. Instead, the transformed document is returned, +along with the list of pipelines that have been executed and the name +of the index where the document would have been indexed if this were +not a simulation. This differs from the +<> in that you specify a +single pipeline for that API, and it only runs that one pipeline. The +simulate pipeline API is more useful for developing a single pipeline, +while the simulate ingest API is more useful for troubleshooting the +interaction of the various pipelines that get applied when ingesting +into an index. + + +By default, the pipeline definitions that are currently in the system +are used. However, you can supply substitute pipeline definitions in the +body of the request. These will be used in place of the pipeline +definitions that are already in the system. This can be used to replace +existing pipeline definitions or to create new ones. The pipeline +substitutions are only used within this request. + +[[simulate-ingest-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +The index to simulate ingesting into. This can be overridden by specifying an index +on each document. If you provide a in the request path, it is used for any +documents that don’t explicitly specify an index argument. + +[[simulate-ingest-api-query-params]] +==== {api-query-parms-title} + +`pipeline`:: +(Optional, string) +Pipeline to use as the default pipeline. This can be used to override the default pipeline +of the index being ingested into. + + +[role="child_attributes"] +[[simulate-ingest-api-request-body]] +==== {api-request-body-title} + +`docs`:: +(Required, array of objects) +Sample documents to test in the pipeline. ++ +.Properties of `docs` objects +[%collapsible%open] +==== +`_id`:: +(Optional, string) +Unique identifier for the document. + +`_index`:: +(Optional, string) +Name of the index that the document will be ingested into. + +`_source`:: +(Required, object) +JSON body for the document. +==== + +`pipeline_substitutions`:: +(Optional, map of strings to objects) +Map of pipeline IDs to substitute pipeline definition objects. ++ +.Properties of pipeline definition objects +[%collapsible%open] +==== +include::put-pipeline.asciidoc[tag=pipeline-object] +==== + +[[simulate-ingest-api-example]] +==== {api-examples-title} + + +[[simulate-ingest-api-pre-existing-pipelines-ex]] +===== Use pre-existing pipeline definitions +In this example the index `index` has a default pipeline called `my-pipeline` and a final +pipeline called `my-final-pipeline`. Since both documents are being ingested into `index`, +both pipelines are executed using the pipeline definitions that are already in the system. + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ] +} +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "docs": [ + { + "doc": { + "_id": "123", + "_index": "my-index", + "_version": -3, + "_source": { + "field1": "value1", + "field2": "value2", + "foo": "bar" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + }, + { + "doc": { + "_id": "456", + "_index": "my-index", + "_version": -3, + "_source": { + "field1": "value1", + "field2": "value2", + "foo": "rab" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + } + ] +} +---- + +[[simulate-ingest-api-request-body-ex]] +===== Specify a pipeline substitution in the request body +In this example the index `index` has a default pipeline called `my-pipeline` and a final +pipeline called `my-final-pipeline`. But a substitute definition of `my-pipeline` is +provided in `pipeline_substitutions`. The substitute `my-pipeline` will be used in place of +the `my-pipeline` that is in the system, and then the `my-final-pipeline` that is already +defined in the system will be executed. + +[source,console] +---- +POST /_ingest/_simulate +{ + "docs": [ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + { + "uppercase": { + "field": "foo" + } + } + ] + } + } +} +---- + +The API returns the following response: + +[source,console-result] +---- +{ + "docs": [ + { + "doc": { + "_id": "123", + "_index": "my-index", + "_version": -3, + "_source": { + "field2": "value2", + "foo": "BAR" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + }, + { + "doc": { + "_id": "456", + "_index": "my-index", + "_version": -3, + "_source": { + "field2": "value2", + "foo": "RAB" + }, + "executed_pipelines": [ + "my-pipeline", + "my-final-pipeline" + ] + } + } + ] +} +---- + +//// +[source,console] +---- +DELETE /my-index + +DELETE /_ingest/pipeline/* +---- + +[source,console-result] +---- +{ + "acknowledged": true +} +---- +//// diff --git a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc index 6ba601e55ebe0..38c695c0b0667 100644 --- a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc @@ -72,8 +72,9 @@ enrich index for the policy. [source,console] ---- -POST /_enrich/policy/postal_policy/_execute +POST /_enrich/policy/postal_policy/_execute?wait_for_completion=false ---- +// TEST[s/\?wait_for_completion=false//] // TEST[continued] Use the <> to create an ingest diff --git a/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc index 306e69577c426..ed75d15df853e 100644 --- a/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc @@ -58,8 +58,9 @@ enrich index for the policy. [source,console] ---- -POST /_enrich/policy/users-policy/_execute +POST /_enrich/policy/users-policy/_execute?wait_for_completion=false ---- +// TEST[s/\?wait_for_completion=false//] // TEST[continued] diff --git a/docs/reference/ingest/processors/dissect.asciidoc b/docs/reference/ingest/processors/dissect.asciidoc index 9d408ea150644..8f25bd8c8b90e 100644 --- a/docs/reference/ingest/processors/dissect.asciidoc +++ b/docs/reference/ingest/processors/dissect.asciidoc @@ -122,6 +122,7 @@ Use the right padding modifier to allow for repetition of the characters after a The right padding modifier may be placed on any key with any other modifiers. It should always be the furthest right modifier. For example: `%{+keyname/1->}` and `%{->}` +// end::dissect-modifier-skip-right-padding[] Right padding modifier example |====== @@ -132,7 +133,9 @@ Right padding modifier example * level = WARN |====== +// tag::dissect-modifier-empty-right-padding[] The right padding modifier may be used with an empty key to help skip unwanted data. For example, the same input string, but wrapped with brackets requires the use of an empty right padded key to achieve the same result. +// end::dissect-modifier-empty-right-padding[] Right padding modifier with empty key example |====== @@ -142,7 +145,6 @@ Right padding modifier with empty key example * ts = 1998-08-10T17:15:42,466 * level = WARN |====== -// end::dissect-modifier-skip-right-padding[] [[append-modifier]] ===== Append modifier (`+`) @@ -151,6 +153,7 @@ Right padding modifier with empty key example Dissect supports appending two or more results together for the output. Values are appended left to right. An append separator can be specified. In this example the append_separator is defined as a space. +// end::append-modifier[] Append modifier example |====== @@ -159,7 +162,7 @@ Append modifier example | *Result* a| * name = john jacob jingleheimer schmidt |====== -// end::append-modifier[] + [[append-order-modifier]] ===== Append with order modifier (`+` and `/n`) @@ -168,6 +171,7 @@ Append modifier example Dissect supports appending two or more results together for the output. Values are appended based on the order defined (`/n`). An append separator can be specified. In this example the append_separator is defined as a comma. +// end::append-order-modifier[] Append with order modifier example |====== @@ -176,7 +180,6 @@ Append with order modifier example | *Result* a| * name = schmidt,john,jingleheimer,jacob |====== -// end::append-order-modifier[] [[named-skip-key]] ===== Named skip key (`?`) @@ -184,6 +187,7 @@ Append with order modifier example // tag::named-skip-key[] Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability it may be desired to give that empty key a name. +// end::named-skip-key[] Named skip key modifier example |====== @@ -193,7 +197,6 @@ Named skip key modifier example * clientip = 1.2.3.4 * @timestamp = 30/Apr/1998:22:00:52 +0000 |====== -// end::named-skip-key[] [[reference-keys]] ===== Reference keys (`*` and `&`) diff --git a/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc index 390360a640ea3..f11a95a6c5fe4 100644 --- a/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc @@ -70,8 +70,9 @@ enrich index for the policy. [source,console] ---- -POST /_enrich/policy/networks-policy/_execute +POST /_enrich/policy/networks-policy/_execute?wait_for_completion=false ---- +// TEST[s/\?wait_for_completion=false//] // TEST[continued] diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index fad11b28858b7..48505ab314c1e 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -54,7 +54,7 @@ A common use case is a user searching FAQs, or a support agent searching a knowl The diagram below shows how documents are processed during ingestion. // Original diagram: https://whimsical.com/ml-in-enterprise-search-ErCetPqrcCPu2QYHvAwrgP@2bsEvpTYSt1Hiuq6UBf68tUWvFiXdzLt6ao -image::../images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"] +image::images/ingest/document-enrichment-diagram.png["ML inference pipeline diagram"] * Documents are processed by the `my-index-0001` pipeline, which happens automatically when indexing through a an Elastic connector or crawler. * The `_run_ml_inference` field is set to `true` to ensure the ML inference pipeline (`my-index-0001@ml-inference`) is executed. @@ -95,7 +95,7 @@ Once your index-specific ML inference pipeline is ready, you can add inference p To add an inference processor to the ML inference pipeline, click the *Add Inference Pipeline* button in the *Machine Learning Inference Pipelines* card. [role="screenshot"] -image::../images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"] +image::images/ingest/document-enrichment-add-inference-pipeline.png["Add Inference Pipeline"] Here, you'll be able to: diff --git a/docs/reference/ingest/search-ingest-pipelines.asciidoc b/docs/reference/ingest/search-ingest-pipelines.asciidoc index 049a74670581d..f37e07f632810 100644 --- a/docs/reference/ingest/search-ingest-pipelines.asciidoc +++ b/docs/reference/ingest/search-ingest-pipelines.asciidoc @@ -22,7 +22,7 @@ To find this tab in the Kibana UI: The tab is highlighted in this screenshot: [.screenshot] -image::../images/ingest/ingest-pipeline-ent-search-ui.png[align="center"] +image::images/ingest/ingest-pipeline-ent-search-ui.png[align="center"] [discrete#ingest-pipeline-search-in-enterprise-search] === Overview diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index 3ea2c96eeaf02..3fc23b44994a7 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -24,6 +24,7 @@ to handle data in a wide variety of use cases: * Store and analyze logs, metrics, and security event data * Use machine learning to automatically model the behavior of your data in real time +* Use {es} as a vector database to create, store, and search vector embeddings * Automate business workflows using {es} as a storage engine * Manage, integrate, and analyze spatial information using {es} as a geographic information system (GIS) diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc index a53a5770fe030..6d6c257f0c594 100644 --- a/docs/reference/landing-page.asciidoc +++ b/docs/reference/landing-page.asciidoc @@ -62,7 +62,7 @@ Elasticsearch is the search and analytics engine that powers the Elastic Stack.

- +

@@ -215,6 +215,12 @@
  • Plugins and integrations
  • +
  • + Search Labs +
  • +
  • + Notebook examples +
  • diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 05e23d901d5d3..478a70e23b93f 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -26,8 +26,9 @@ Instantiates a {dfeed}. [[ml-put-datafeed-desc]] == {api-description-title} -{ml-docs}/ml-dfeeds.html[{dfeeds-cap}] retrieve data from {es} for analysis by -an {anomaly-job}. You can associate only one {dfeed} to each {anomaly-job}. +{ml-docs}/ml-ad-run-jobs.html#ml-ad-datafeeds[{dfeeds-cap}] retrieve data from +{es} for analysis by an {anomaly-job}. You can associate only one {dfeed} to +each {anomaly-job}. The {dfeed} contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay`) at diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index c16098910bbe3..bf98327807e70 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -146,6 +146,11 @@ When an alert occurs, it is always the same name as the job ID of the associated them from generating actions. For more details, refer to {kibana-ref}/create-and-manage-rules.html#controlling-rules[Snooze and disable rules]. +You can also review how the alerts that are occured correlate with the +{anomaly-detect} results in the **Anomaly exloprer** by using the +**Anomaly timeline** swimlane and the **Alerts** panel. + + [[creating-anomaly-jobs-health-rules]] == {anomaly-jobs-cap} health rules diff --git a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc index 7da46e13a8ce4..45517b99c2177 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc @@ -443,121 +443,8 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== +Refer to <> to review the properties of the +`tokenization` object. ===== `ner`::: @@ -582,121 +469,8 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== +Refer to <> to review the +properties of the `tokenization` object. ===== `pass_through`::: @@ -714,738 +488,121 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +Refer to <> to review the properties of the +`tokenization` object. +===== -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: +`question_answering`::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-question-answering] + -.Properties of roberta +.Properties of question_answering inference [%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: +===== +`max_answer_length`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +The maximum amount of words in the answer. Defaults to `15`. -`truncate`:::: +`results_field`:::: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: +`tokenization`:::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +Recommended to set `max_sentence_length` to `386` with `128` of `span` and set +`truncate` to `none`. ++ +Refer to <> to review the properties of the +`tokenization` object. +===== -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: +`regression`::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] +Regression configuration for inference. + -.Properties of xlm_roberta +.Properties of regression inference [%collapsible%open] -======= -`max_sequence_length`:::: +===== +`num_top_feature_importance_values`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-regression-num-top-feature-importance-values] -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== - -`question_answering`::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-question-answering] -+ -.Properties of question_answering inference -[%collapsible%open] -===== -`max_answer_length`:::: -(Optional, integer) -The maximum amount of words in the answer. Defaults to `15`. - -`results_field`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -Recommended to set `max_sentence_length` to `386` with `128` of `span` and set -`truncate` to `none`. -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== - -`regression`::: -(Optional, object) -Regression configuration for inference. -+ -.Properties of regression inference -[%collapsible%open] -===== -`num_top_feature_importance_values`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-regression-num-top-feature-importance-values] - -`results_field`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] -===== - -`text_classification`::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classification] -+ -.Properties of text_classification inference -[%collapsible%open] -===== -`classification_labels`:::: -(Optional, string) An array of classification labels. - -`num_top_classes`:::: -(Optional, integer) -Specifies the number of top class predictions to return. Defaults to all classes (-1). - -`results_field`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== -`text_embedding`::: -(Object, optional) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding] -+ -.Properties of text_embedding inference -[%collapsible%open] -===== -`embedding_size`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding-size] - -`results_field`:::: +`results_field`:::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] -+ -.Properties of xlm_roberta -[%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] -+ -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== ===== -`text_similarity`:::: -(Object, optional) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] -+ -.Properties of text_similarity inference -[%collapsible%open] -===== -`span_score_combination_function`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`add_prefix_space`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] - -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] - -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] - -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] - -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: +`text_classification`::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classification] + -.Properties of xlm_roberta +.Properties of text_classification inference [%collapsible%open] -======= -`max_sequence_length`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +===== +`classification_labels`:::: +(Optional, string) An array of classification labels. -`span`:::: +`num_top_classes`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] +Specifies the number of top class predictions to return. Defaults to all classes +(-1). -`truncate`:::: +`results_field`:::: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: +`tokenization`:::: (Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of bert_ja -[%collapsible%open] -======= -`do_lower_case`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] +Refer to <> to review the properties of the +`tokenization` object. +===== -`max_sequence_length`:::: +`text_embedding`::: +(Object, optional) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding] ++ +.Properties of text_embedding inference +[%collapsible%open] +===== +`embedding_size`:::: (Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding-size] -`span`:::: -(Optional, integer) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] +`results_field`:::: +(Optional, string) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] + +`tokenization`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +Refer to <> to review the properties of the +`tokenization` object. +===== -`truncate`:::: +`text_similarity`:::: +(Object, optional) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] ++ +.Properties of text_similarity inference +[%collapsible%open] +===== +`span_score_combination_function`:::: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func] -`with_special_tokens`:::: -(Optional, boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== +`tokenization`:::: +(Optional, object) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +Refer to <> to review the properties of the +`tokenization` object. ===== + `zero_shot_classification`::: (Object, optional) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification] @@ -1477,190 +634,242 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] + -.Properties of tokenization +Refer to <> to review the properties of the +`tokenization` object. +===== +==== +//End of inference_config + +//Begin input +`input`:: +(Required, object) +The input field names for the model definition. ++ +.Properties of `input` [%collapsible%open] -====== -`bert`:::: +==== +`field_names`::: +(Required, string) +An array of input field names for the model. +==== +//End input + +// Begin location +`location`:: +(Optional, object) +The model definition location. If the `definition` or `compressed_definition` +are not specified, the `location` is required. ++ +.Properties of `location` +[%collapsible%open] +==== +`index`::: +(Required, object) +Indicates that the model definition is stored in an index. This object must be +empty as the index for storing model definitions is configured automatically. +==== +// End location + +`metadata`:: +(Optional, object) +An object map that contains metadata about the model. + +`model_size_bytes`:: +(Optional, integer) +The estimated memory usage in bytes to keep the trained model in memory. This +property is supported only if `defer_definition_decompression` is `true` or the +model definition is not supplied. + +`model_type`:: +(Optional, string) +The created model type. By default the model type is `tree_ensemble`. +Appropriate types are: ++ +-- +* `tree_ensemble`: The model definition is an ensemble model of decision trees. +* `lang_ident`: A special type reserved for language identification models. +* `pytorch`: The stored definition is a PyTorch (specifically a TorchScript) model. Currently only +NLP models are supported. For more information, refer to {ml-docs}/ml-nlp.html[{nlp-cap}]. +-- +`platform_architecture`:: +(Optional, string) +If the model only works on one platform, because it is heavily +optimized for a particular processor architecture and OS combination, +then this field specifies which. The format of the string must match +the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, +`linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. +For portable models (those that work independent of processor architecture or +OS features), leave this field unset. + +//Begin prefix_strings +`prefix_strings`:: +(Optional, object) +Certain NLP models are trained in such a way that a prefix string should +be applied to the input text before the input is evaluated. The prefix +may be different depending on the intention. For asymmetric tasks such +as infromation retrieval the prefix applied to a passage as it is indexed +can be different to the prefix applied when searching those passages. + +`prefix_strings` has 2 options, a prefix string that is always applied +in the search context and one that is always applied when ingesting the +docs. Both are optional. ++ +.Properties of `prefix_strings` +[%collapsible%open] +==== +`search`::: +(Optional, string) +The prefix string to prepend to the input text for requests +originating from a search query. + +`ingest`::: +(Optional, string) +The prefix string to prepend to the input text for requests +at ingest where the {infer} ingest processor is used. // TODO is there a shortcut for Inference ingest processor? +==== +//End prefix_strings + +`tags`:: +(Optional, string) +An array of tags to organize the model. + + +[[tokenization-properties]] +=== Properties of `tokenizaton` + +The `tokenization` object has the following properties. + +`bert`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] + .Properties of bert [%collapsible%open] -======= -`do_lower_case`:::: +==== +`do_lower_case`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens] -======= -`roberta`:::: +==== +`roberta`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] + .Properties of roberta [%collapsible%open] -======= -`add_prefix_space`:::: +==== +`add_prefix_space`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-add-prefix-space] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`mpnet`:::: +==== +`mpnet`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] + .Properties of mpnet [%collapsible%open] -======= -`do_lower_case`:::: +==== +`do_lower_case`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet-with-special-tokens] -======= -`xlm_roberta`:::: +==== +`xlm_roberta`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta] + .Properties of xlm_roberta [%collapsible%open] -======= -`max_sequence_length`:::: +==== +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta-with-special-tokens] -======= -`bert_ja`:::: +==== +`bert_ja`:: (Optional, object) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja] + .Properties of bert_ja [%collapsible%open] -======= -`do_lower_case`:::: +==== +`do_lower_case`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-do-lower-case] -`max_sequence_length`:::: +`max_sequence_length`::: (Optional, integer) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-max-sequence-length] -`truncate`:::: +`span`::: +(Optional, integer) +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span] + +`truncate`::: (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -`with_special_tokens`:::: +`with_special_tokens`::: (Optional, boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens] -======= -====== -===== -==== -//End of inference_config - -//Begin input -`input`:: -(Required, object) -The input field names for the model definition. -+ -.Properties of `input` -[%collapsible%open] -==== -`field_names`::: -(Required, string) -An array of input field names for the model. -==== -//End input - -// Begin location -`location`:: -(Optional, object) -The model definition location. If the `definition` or `compressed_definition` -are not specified, the `location` is required. -+ -.Properties of `location` -[%collapsible%open] -==== -`index`::: -(Required, object) -Indicates that the model definition is stored in an index. This object must be -empty as the index for storing model definitions is configured automatically. ==== -// End location - -`metadata`:: -(Optional, object) -An object map that contains metadata about the model. - -`model_size_bytes`:: -(Optional, integer) -The estimated memory usage in bytes to keep the trained model in memory. This -property is supported only if `defer_definition_decompression` is `true` or the -model definition is not supplied. - -`model_type`:: -(Optional, string) -The created model type. By default the model type is `tree_ensemble`. -Appropriate types are: -+ --- -* `tree_ensemble`: The model definition is an ensemble model of decision trees. -* `lang_ident`: A special type reserved for language identification models. -* `pytorch`: The stored definition is a PyTorch (specifically a TorchScript) model. Currently only -NLP models are supported. For more information, refer to {ml-docs}/ml-nlp.html[{nlp-cap}]. --- -`platform_architecture`:: -(Optional, string) -If the model only works on one platform, because it is heavily -optimized for a particular processor architecture and OS combination, -then this field specifies which. The format of the string must match -the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, -`linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. -For portable models (those that work independent of processor architecture or -OS features), leave this field unset. - - -`tags`:: -(Optional, string) -An array of tags to organize the model. [[ml-put-trained-models-example]] diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index 1f7b83294651f..5a7aa43155c66 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -183,6 +183,13 @@ The minimum improvement in weight which triggers a rebalancing shard movement. Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing shards sooner, leaving the cluster in a more unbalanced state. -NOTE: Regardless of the result of the balancing algorithm, rebalancing might +[NOTE] +==== +* It is not recommended to adjust the values of the heuristics settings. The +default values are generally good, and although different values may improve +the current balance, it is possible that they create problems in the future +if the cluster or workload changes. +* Regardless of the result of the balancing algorithm, rebalancing might not be allowed due to allocation rules such as forced awareness and allocation filtering. +==== diff --git a/docs/reference/modules/discovery/quorums.asciidoc b/docs/reference/modules/discovery/quorums.asciidoc index 6f6e978891096..f6f50b88b3190 100644 --- a/docs/reference/modules/discovery/quorums.asciidoc +++ b/docs/reference/modules/discovery/quorums.asciidoc @@ -15,7 +15,7 @@ those of the other piece. Elasticsearch allows you to add and remove master-eligible nodes to a running cluster. In many cases you can do this simply by starting or stopping the nodes -as required. See <>. +as required. See <> for more information. As nodes are added or removed Elasticsearch maintains an optimal level of fault tolerance by updating the cluster's <> for more information. +==== +// end::quorums-and-availability[] + +After a master-eligible node has joined or left the cluster the elected master +may issue a cluster-state update that adjusts the voting configuration to match, +and this can take a short time to complete. It is important to wait for this +adjustment to complete before removing more nodes from the cluster. See +<> for more information. [discrete] ==== Master elections diff --git a/docs/reference/modules/discovery/voting.asciidoc b/docs/reference/modules/discovery/voting.asciidoc index b249f9f38bfd4..04cae9d02ab66 100644 --- a/docs/reference/modules/discovery/voting.asciidoc +++ b/docs/reference/modules/discovery/voting.asciidoc @@ -11,12 +11,7 @@ Usually the voting configuration is the same as the set of all the master-eligible nodes that are currently in the cluster. However, there are some situations in which they may be different. -IMPORTANT: To ensure the cluster remains available, you **must not stop half or -more of the nodes in the voting configuration at the same time**. As long as more -than half of the voting nodes are available, the cluster can work normally. For -example, if there are three or four master-eligible nodes, the cluster -can tolerate one unavailable node. If there are two or fewer master-eligible -nodes, they must all remain available. +include::quorums.asciidoc[tag=quorums-and-availability] After a node joins or leaves the cluster, {es} reacts by automatically making corresponding changes to the voting configuration in order to ensure that the diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index d15fd40846529..e924cc05376d9 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -78,29 +78,7 @@ GET my-index/_search ---- // TEST[skip: TBD] -[discrete] -[[optimizing-text-expansion]] -=== Optimizing the search performance of the text_expansion query - -https://www.elastic.co/blog/faster-retrieval-of-top-hits-in-elasticsearch-with-block-max-wand[Max WAND] -is an optimization technique used by {es} to skip documents that cannot score -competitively against the current best matching documents. However, the tokens -generated by the ELSER model don't work well with the Max WAND optimization. -Consequently, enabling Max WAND can actually increase query latency for -`text_expansion`. For datasets of a significant size, disabling Max -WAND leads to lower query latencies. - -Max WAND is controlled by the -<> query parameter. Setting track_total_hits -to true forces {es} to consider all documents, resulting in lower query -latencies for the `text_expansion` query. However, other {es} queries run slower -when Max WAND is disabled. - -If you are combining the `text_expansion` query with standard text queries in a -compound search, it is recommended to measure the query performance before -deciding which setting to use. - -NOTE: The `track_total_hits` option applies to all queries in the search request -and may be optimal for some queries but not for others. Take into account the -characteristics of all your queries to determine the most suitable -configuration. +[NOTE] +==== +Depending on your data, the text expansion query may be faster with `track_total_hits: false`. +==== diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index f065c2deeae72..e0568f500f268 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1720,11 +1720,6 @@ See <>. See <>. -[role="exclude",id="getting-started"] -=== Quick start - -See {estc-welcome}/getting-started-general-purpose.html[Set up a general purpose Elastic deployment]. - [role="exclude",id="getting-started-index"] === Index some documents diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 65a5c741a83c5..011c44216cc0c 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -7,6 +7,7 @@ This section summarizes the changes in each release. * <> +* <> * <> * <> * <> @@ -55,6 +56,7 @@ This section summarizes the changes in each release. -- include::release-notes/8.12.0.asciidoc[] +include::release-notes/8.11.1.asciidoc[] include::release-notes/8.11.0.asciidoc[] include::release-notes/8.10.4.asciidoc[] include::release-notes/8.10.3.asciidoc[] diff --git a/docs/reference/release-notes/8.10.0.asciidoc b/docs/reference/release-notes/8.10.0.asciidoc index 9fbe7a2b1d099..34d1d26e5d69a 100644 --- a/docs/reference/release-notes/8.10.0.asciidoc +++ b/docs/reference/release-notes/8.10.0.asciidoc @@ -35,6 +35,8 @@ delete all the snapshots in the repository taken with version 8.10.0 or later using a cluster running version 8.10.4. // end::repositorydata-format-change[] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[breaking-8.10.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.10.1.asciidoc b/docs/reference/release-notes/8.10.1.asciidoc index d049d5b33b1f7..0cb00699eeac7 100644 --- a/docs/reference/release-notes/8.10.1.asciidoc +++ b/docs/reference/release-notes/8.10.1.asciidoc @@ -9,6 +9,8 @@ Also see <>. include::8.10.0.asciidoc[tag=repositorydata-format-change] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.10.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.10.2.asciidoc b/docs/reference/release-notes/8.10.2.asciidoc index c428b4534fe79..911a410104a26 100644 --- a/docs/reference/release-notes/8.10.2.asciidoc +++ b/docs/reference/release-notes/8.10.2.asciidoc @@ -7,4 +7,6 @@ include::8.10.0.asciidoc[tag=repositorydata-format-change] +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + Also see <>. diff --git a/docs/reference/release-notes/8.10.3.asciidoc b/docs/reference/release-notes/8.10.3.asciidoc index b7828f52ad082..119930058a42e 100644 --- a/docs/reference/release-notes/8.10.3.asciidoc +++ b/docs/reference/release-notes/8.10.3.asciidoc @@ -7,6 +7,19 @@ include::8.10.0.asciidoc[tag=repositorydata-format-change] +// tag::no-preventive-gc-issue[] +* High Memory Pressure due to a GC change in JDK 21 ++ +This version of Elasticsearch is bundled with JDK 21. In JDK 21 +https://bugs.openjdk.org/browse/JDK-8297639[Preventive GC has been removed]. +This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large +documents under some particular load. (issue: {es-issue}99592[#99592]) ++ +If you needed to explicitly <>, we recommend you avoid to upgrade to this version, as the settings to enable Preventive GC have been removed +from JDK 21. +// end::no-preventive-gc-issue[] + Also see <>. [[bug-8.10.3]] diff --git a/docs/reference/release-notes/8.10.4.asciidoc b/docs/reference/release-notes/8.10.4.asciidoc index f2e95af71afcb..6c49bae1e2150 100644 --- a/docs/reference/release-notes/8.10.4.asciidoc +++ b/docs/reference/release-notes/8.10.4.asciidoc @@ -25,6 +25,8 @@ first. If you cannot repair the repository in this way, first delete all the snapshots in the repository taken with version 8.10.0 or later using a cluster running version 8.10.4. +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + Also see <>. [[bug-8.10.4]] diff --git a/docs/reference/release-notes/8.11.0.asciidoc b/docs/reference/release-notes/8.11.0.asciidoc index 08ddaf5667845..acb27dc180727 100644 --- a/docs/reference/release-notes/8.11.0.asciidoc +++ b/docs/reference/release-notes/8.11.0.asciidoc @@ -10,6 +10,11 @@ Also see <>. Infra/Core:: * Remove `transport_versions` from cluster state API {es-pull}99223[#99223] +[[known-issues-8.11.0]] +[float] +=== Known issues +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.11.0]] [float] === Bug fixes @@ -297,8 +302,8 @@ Transform:: * Add accessors required to recreate `TransformStats` object from the fields {es-pull}98844[#98844] Vector Search:: -* Add new max_inner_product vector similarity function {es-pull}99445[#99445] -* Adds `nested` support for indexed `dense_vector` fields {es-pull}99532[#99532] +* Add new max_inner_product vector similarity function {es-pull}99527[#99527] +* Adds `nested` support for indexed `dense_vector` fields {es-pull}99763[#99763] * Dense vector field types are indexed by default {es-pull}98268[#98268] * Increase the max vector dims to 4096 {es-pull}99682[#99682] diff --git a/docs/reference/release-notes/8.11.1.asciidoc b/docs/reference/release-notes/8.11.1.asciidoc new file mode 100644 index 0000000000000..b1dbc4a95c963 --- /dev/null +++ b/docs/reference/release-notes/8.11.1.asciidoc @@ -0,0 +1,43 @@ +[[release-notes-8.11.1]] +== {es} version 8.11.1 + +Also see <>. + +[[known-issues-8.11.1]] +[float] +=== Known issues +include::8.10.3.asciidoc[tag=no-preventive-gc-issue] + +[[bug-8.11.1]] +[float] +=== Bug fixes + +Allocation:: +* Avoid negative `DesiredBalanceStats#lastConvergedIndex` {es-pull}101998[#101998] + +Authentication:: +* Fix memory leak from JWT cache (and fix the usage of the JWT auth cache) {es-pull}101799[#101799] + +Machine Learning:: +* Fix inference timeout from the Inference Ingest Processor {es-pull}101971[#101971] + +Mapping:: +* Fix incorrect dynamic mapping for non-numeric-value arrays #101965 {es-pull}101967[#101967] + +Network:: +* Fail listener on exception in `TcpTransport#openConnection` {es-pull}101907[#101907] (issue: {es-issue}100510[#100510]) + +Search:: +* Dry up `AsyncTaskIndexService` memory management and fix inefficient circuit breaker use {es-pull}101892[#101892] + +Snapshot/Restore:: +* Respect regional AWS STS endpoints {es-pull}101705[#101705] (issue: {es-issue}89175[#89175]) + +[[enhancement-8.11.1]] +[float] +=== Enhancements + +Machine Learning:: +* Add inference counts by model to the machine learning usage stats {es-pull}101915[#101915] + + diff --git a/docs/reference/release-notes/8.7.1.asciidoc b/docs/reference/release-notes/8.7.1.asciidoc index a0513bc1a8f0e..70f5e4add88ca 100644 --- a/docs/reference/release-notes/8.7.1.asciidoc +++ b/docs/reference/release-notes/8.7.1.asciidoc @@ -18,6 +18,23 @@ This issue is fixed in 8.8.0. include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] +// tag::no-preventive-gc-issue[] +* High Memory Pressure due to a GC JVM setting change ++ +This version of Elasticsearch is bundled with JDK 20. In JDK 20 +https://bugs.openjdk.org/browse/JDK-8293861[Preventive GC is disabled by default]. +This may lead to increased memory pressure and an increased number of CircuitBreakerExceptions when retrieving large +documents under some load patterns. (issue: {es-issue}99592[#99592]) ++ +If this change affects your use of Elasticsearch, consider re-enabling the previous behaviour +by adding the JVM arguments `-XX:+UnlockDiagnosticVMOptions -XX:+G1UsePreventiveGC` (reference: +https://www.oracle.com/java/technologies/javase/20-relnote-issues.html#JDK-8293861[JDK 20 release notes]). It is +important to note that this workaround is temporary and works only with JDK 20, which is bundled with Elasticsearch up +to version 8.10.2 inclusive. Successive versions are bundling JDK 21+, where this setting +https://bugs.openjdk.org/browse/JDK-8297639[has been removed]. Specifying those JVM arguments will prevent the +JVM (and therefore Elasticsearch Nodes) from starting. +// end::no-preventive-gc-issue[] + [[bug-8.7.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.8.2.asciidoc b/docs/reference/release-notes/8.8.2.asciidoc index d7e6b9b1fcc76..8a24ae2e8d4ef 100644 --- a/docs/reference/release-notes/8.8.2.asciidoc +++ b/docs/reference/release-notes/8.8.2.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.8.2]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.8.2]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.9.0.asciidoc b/docs/reference/release-notes/8.9.0.asciidoc index 2b7b143c268dc..c49eac9f0327c 100644 --- a/docs/reference/release-notes/8.9.0.asciidoc +++ b/docs/reference/release-notes/8.9.0.asciidoc @@ -12,6 +12,8 @@ task is longer than the model's max_sequence_length and truncate is set to none then inference fails with the message `question answering result has invalid dimension`. (issue: {es-issue}97917[#97917]) +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[breaking-8.9.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.9.1.asciidoc b/docs/reference/release-notes/8.9.1.asciidoc index 18c226538c4b9..680860622c1bb 100644 --- a/docs/reference/release-notes/8.9.1.asciidoc +++ b/docs/reference/release-notes/8.9.1.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.9.1]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [[bug-8.9.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.9.2.asciidoc b/docs/reference/release-notes/8.9.2.asciidoc index 6b00405261daf..8464d21e1ccc4 100644 --- a/docs/reference/release-notes/8.9.2.asciidoc +++ b/docs/reference/release-notes/8.9.2.asciidoc @@ -3,6 +3,11 @@ Also see <>. +[[known-issues-8.9.2]] +[float] +=== Known issues +include::8.7.1.asciidoc[tag=no-preventive-gc-issue] + [float] [[security-updates-8.9.2]] === Security updates diff --git a/docs/reference/rest-api/security/get-api-keys.asciidoc b/docs/reference/rest-api/security/get-api-keys.asciidoc index ddbe0612ec987..d75edda9296a5 100644 --- a/docs/reference/rest-api/security/get-api-keys.asciidoc +++ b/docs/reference/rest-api/security/get-api-keys.asciidoc @@ -175,7 +175,7 @@ A successful call returns a JSON structure that contains the information of the <4> Creation time for the API key in milliseconds <5> Optional expiration time for the API key in milliseconds <6> Invalidation status for the API key. If the key has been invalidated, it has -a value of `true`. Otherwise, it is `false`. +a value of `true` and an additional field with the `invalidation` time in milliseconds. Otherwise, it is `false`. <7> Principal for which this API key was created <8> Realm name of the principal for which this API key was created <9> Metadata of the API key diff --git a/docs/reference/rest-api/security/grant-api-keys.asciidoc b/docs/reference/rest-api/security/grant-api-keys.asciidoc index ad16f602d32c2..8feb6c3cd5f52 100644 --- a/docs/reference/rest-api/security/grant-api-keys.asciidoc +++ b/docs/reference/rest-api/security/grant-api-keys.asciidoc @@ -15,7 +15,7 @@ Creates an API key on behalf of another user. [[security-api-grant-api-key-prereqs]] ==== {api-prereq-title} -* To use this API, you must have the `grant_api_key` cluster privilege. +* To use this API, you must have the `grant_api_key` or the `manage_api_key` cluster privilege. [[security-api-grant-api-key-desc]] ==== {api-description-title} @@ -23,10 +23,13 @@ Creates an API key on behalf of another user. This API is similar to <>, however it creates the API key for a user that is different than the user that runs the API. -The caller must have authentication credentials (either an access token, -or a username and password) for the user on whose behalf the API key will be -created. It is not possible to use this API to create an API key without that -user's credentials. +The caller must have authentication credentials for the user on whose behalf +the API key will be created. It is not possible to use this API to create an +API key without that user's credentials. +The supported user authentication credentials types are: + * username and password + * <> + * <> The user, for whom the authentication credentials is provided, can optionally <> (impersonate) another user. @@ -55,8 +58,11 @@ The following parameters can be specified in the body of a POST request: `access_token`:: (Required*, string) -The user's access token. If you specify the `access_token` grant type, this -parameter is required. It is not valid with other grant types. +The user's <>, or JWT. Both <> and +<> JWT token types are supported, and they depend on the underlying JWT realm configuration. +The created API key will have a point in time snapshot of permissions of the user authenticated with this token +(or even more restricted permissions, see the `role_descriptors` parameter). +If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. `api_key`:: (Required, object) @@ -83,15 +89,32 @@ It supports nested data structure. Within the `metadata` object, keys beginning with `_` are reserved for system usage. +`client_authentication`:: +(Optional, object) When using the `access_token` grant type, and when supplying a +JWT, this specifies the client authentication for <> that +need it (i.e. what's normally specified by the `ES-Client-Authentication` request header). + +`scheme`::: +(Required, string) The scheme (case-sensitive) as it's supplied in the +`ES-Client-Authentication` request header. Currently, the only supported +value is <>. + +`value`::: +(Required, string) The value that follows the scheme for the client credentials +as it's supplied in the `ES-Client-Authentication` request header. For example, +if the request header would be `ES-Client-Authentication: SharedSecret myShar3dS3cret` +if the client were to authenticate directly with a JWT, then `value` here should +be `myShar3dS3cret`. + `grant_type`:: (Required, string) The type of grant. Supported grant types are: `access_token`,`password`. `access_token`::: (Required*, string) -In this type of grant, you must supply an access token that was created by the -{es} token service. For more information, see -<> and <>. +In this type of grant, you must supply either an access token, that was created by the +{es} token service (see <> and <>), +or a <> (either a JWT `access_token` or a JWT `id_token`). `password`::: In this type of grant, you must supply the user ID and password for which you diff --git a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc index 6f2d234395e95..afadf394aa43c 100644 --- a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc +++ b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc @@ -96,31 +96,33 @@ requested set of cluster, index, and application privileges: [source,console] -------------------------------------------------- -POST /_security/user/_has_privileges +POST /_security/profile/_has_privileges { "uids": [ "u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0", "u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1", "u_does-not-exist_0" ], - "cluster": [ "monitor", "create_snapshot", "manage_ml" ], - "index" : [ - { - "names": [ "suppliers", "products" ], - "privileges": [ "create_doc"] - }, - { - "names": [ "inventory" ], - "privileges" : [ "read", "write" ] - } - ], - "application": [ - { - "application": "inventory_manager", - "privileges" : [ "read", "data:write/inventory" ], - "resources" : [ "product/1852563" ] - } - ] + "privileges": { + "cluster": [ "monitor", "create_snapshot", "manage_ml" ], + "index" : [ + { + "names": [ "suppliers", "products" ], + "privileges": [ "create_doc"] + }, + { + "names": [ "inventory" ], + "privileges" : [ "read", "write" ] + } + ], + "application": [ + { + "application": "inventory_manager", + "privileges" : [ "read", "data:write/inventory" ], + "resources" : [ "product/1852563" ] + } + ] + } } -------------------------------------------------- // TEST[skip:TODO setup and tests will be possible once the profile uid is predictable] diff --git a/docs/reference/rest-api/security/query-api-key.asciidoc b/docs/reference/rest-api/security/query-api-key.asciidoc index f7b315d5db904..0e5973a010a47 100644 --- a/docs/reference/rest-api/security/query-api-key.asciidoc +++ b/docs/reference/rest-api/security/query-api-key.asciidoc @@ -77,6 +77,9 @@ Expiration time of the API key in milliseconds. Indicates whether the API key is invalidated. If `true`, the key is invalidated. Defaults to `false`. +`invalidation`:: +Invalidation time of the API key in milliseconds. This field is only set for invalidated API keys. + `username`:: Username of the API key owner. diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index c33d203f1415b..959a798378fc6 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -183,6 +183,7 @@ GET /_xpack/usage "avg": 0.0, "max": 0.0 }, + "stats_by_model": [], "model_sizes_bytes": { "total": 0.0, "min": 0.0, diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 0403f9b04b2d1..2e32324cb44d9 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -22,6 +22,13 @@ or alias. To search a <> for an alias, you must have the `read` index privilege for the alias's data streams or indices. +[[point-in-time-api-request-body]] +==== {api-request-body-title} + +`index_filter`:: +(Optional, <> Allows to filter indices if the provided +query rewrites to `match_none` on every shard. + [[point-in-time-api-example]] ==== {api-examples-title} @@ -60,7 +67,7 @@ POST /_search <1> or <> as these parameters are copied from the point in time. <2> Just like regular searches, you can <>, up to the first 10,000 hits. If you +`size` to page through search results>>, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with <>. <3> The `id` parameter tells Elasticsearch to execute the request using contexts from this point in time. diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index 4bf1ceabe08d8..c39719f1a3b61 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -76,12 +76,10 @@ to search one or more `dense_vector` fields with indexing enabled. requires the following mapping options: + -- -* An `index` value of `true`. - * A `similarity` value. This value determines the similarity metric used to score documents based on similarity between the query and document vector. For a list of available metrics, see the <> -parameter documentation. +parameter documentation. The `similarity` setting defaults to `cosine`. [source,console] ---- @@ -92,13 +90,11 @@ PUT image-index "image-vector": { "type": "dense_vector", "dims": 3, - "index": true, "similarity": "l2_norm" }, "title-vector": { "type": "dense_vector", "dims": 5, - "index": true, "similarity": "l2_norm" }, "title": { @@ -158,7 +154,7 @@ NOTE: Support for approximate kNN search was added in version 8.0. Before this, `dense_vector` fields did not support enabling `index` in the mapping. If you created an index prior to 8.0 containing `dense_vector` fields, then to support approximate kNN search the data must be reindexed using a new field -mapping that sets `index: true`. +mapping that sets `index: true` which is the default option. [discrete] [[tune-approximate-knn-for-speed-accuracy]] @@ -199,9 +195,7 @@ PUT byte-image-index "byte-image-vector": { "type": "dense_vector", "element_type": "byte", - "dims": 2, - "index": true, - "similarity": "cosine" + "dims": 2 }, "title": { "type": "text" @@ -516,9 +510,7 @@ PUT passage_vectors "properties": { "vector": { "type": "dense_vector", - "dims": 2, - "index": true, - "similarity": "cosine" + "dims": 2 }, "text": { "type": "text", @@ -877,7 +869,6 @@ PUT image-index "image-vector": { "type": "dense_vector", "dims": 3, - "index": true, "similarity": "l2_norm", "index_options": { "type": "hnsw", @@ -912,8 +903,8 @@ the global top `k` matches across shards. You cannot set the To run an exact kNN search, use a `script_score` query with a vector function. . Explicitly map one or more `dense_vector` fields. If you don't intend to use -the field for approximate kNN, omit the `index` mapping option or set it to -`false`. This can significantly improve indexing speed. +the field for approximate kNN, set the `index` mapping option to `false`. This +can significantly improve indexing speed. + [source,console] ---- diff --git a/docs/reference/search/search-your-data/search-api.asciidoc b/docs/reference/search/search-your-data/search-api.asciidoc index f3e271918b9b2..496812a0cedb4 100644 --- a/docs/reference/search/search-your-data/search-api.asciidoc +++ b/docs/reference/search/search-your-data/search-api.asciidoc @@ -440,6 +440,17 @@ GET my-index-000001/_search Finally you can force an accurate count by setting `"track_total_hits"` to `true` in the request. +[TIP] +========================================= +The `track_total_hits` parameter allows you to trade hit count accuracy for performance. +In general the lower the value of `track_total_hits` the faster the query will be, +with `false` returning the fastest results. +Setting `track_total_hits` to true will cause {es} to return exact hit counts, which could +hurt query performance because it disables the +https://www.elastic.co/blog/faster-retrieval-of-top-hits-in-elasticsearch-with-block-max-wand[Max WAND] +optimization. +========================================= + [discrete] [[quickly-check-for-matching-docs]] === Quickly check for matching docs diff --git a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc index 164beb221cd4f..0bee9533cd358 100644 --- a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc @@ -45,7 +45,7 @@ you must provide suitably sized nodes yourself. First, the mapping of the destination index - the index that contains the tokens that the model created based on your text - must be created. The destination index must have a field with the -<> or <> field +<> or <> field type to index the ELSER output. NOTE: ELSER output must be ingested into a field with the `sparse_vector` or @@ -72,11 +72,11 @@ PUT my-index } ---- // TEST[skip:TBD] -<1> The name of the field to contain the generated tokens. It must be refrenced +<1> The name of the field to contain the generated tokens. It must be refrenced in the {infer} pipeline configuration in the next step. <2> The field to contain the tokens is a `sparse_vector` field. -<3> The name of the field from which to create the sparse vector representation. -In this example, the name of the field is `content`. It must be referenced in the +<3> The name of the field from which to create the sparse vector representation. +In this example, the name of the field is `content`. It must be referenced in the {infer} pipeline configuration in the next step. <4> The field type which is text in this example. @@ -93,24 +93,24 @@ that is being ingested in the pipeline. [source,console] ---- -PUT _ingest/pipeline/elser-v2-test -{ - "processors": [ - { - "inference": { - "model_id": ".elser_model_2", - "input_output": [ <1> - { - "input_field": "content", - "output_field": "content_embedding" - } - ] - } - } - ] +PUT _ingest/pipeline/elser-v2-test +{ + "processors": [ + { + "inference": { + "model_id": ".elser_model_2", + "input_output": [ <1> + { + "input_field": "content", + "output_field": "content_embedding" + } + ] + } + } + ] } ---- -<1> Configuration object that defines the `input_field` for the {infer} process +<1> Configuration object that defines the `input_field` for the {infer} process and the `output_field` that will contain the {infer} results. //// @@ -137,8 +137,8 @@ https://github.com/elastic/stack-docs/blob/main/docs/en/stack/ml/nlp/data/msmarc Download the file and upload it to your cluster using the {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[Data Visualizer] -in the {ml-app} UI. Assign the name `id` to the first column and `content` to -the second column. The index name is `test-data`. Once the upload is complete, +in the {ml-app} UI. Assign the name `id` to the first column and `content` to +the second column. The index name is `test-data`. Once the upload is complete, you can see an index named `test-data` with 182469 documents. @@ -184,9 +184,9 @@ follow the progress. [[text-expansion-query]] ==== Semantic search by using the `text_expansion` query -To perform semantic search, use the `text_expansion` query, and provide the -query text and the ELSER model ID. The example below uses the query text "How to -avoid muscle soreness after running?", the `content_embedding` field contains +To perform semantic search, use the `text_expansion` query, and provide the +query text and the ELSER model ID. The example below uses the query text "How to +avoid muscle soreness after running?", the `content_embedding` field contains the generated ELSER output: [source,console] @@ -208,9 +208,9 @@ GET my-index/_search The result is the top 10 documents that are closest in meaning to your query text from the `my-index` index sorted by their relevancy. The result also contains the extracted tokens for each of the relevant search results with their -weights. Tokens are learned associations capturing relevance, they are not -synonyms. To learn more about what tokens are, refer to -{ml-docs}/ml-nlp-elser.html#elser-tokens[this page]. It is possible to exclude +weights. Tokens are learned associations capturing relevance, they are not +synonyms. To learn more about what tokens are, refer to +{ml-docs}/ml-nlp-elser.html#elser-tokens[this page]. It is possible to exclude tokens from source, refer to <> to learn more. [source,consol-result] @@ -253,9 +253,6 @@ tokens from source, refer to <> to learn more. ---- // NOTCONSOLE -To learn about optimizing your `text_expansion` query, refer to -<>. - [discrete] [[text-expansion-compound-query]] @@ -281,7 +278,7 @@ GET my-index/_search "bool": { <1> "should": [ { - "text_expansion": { + "text_expansion": { "content_embedding": { "model_text": "How to avoid muscle soreness after running?", "model_id": ".elser_model_2", @@ -333,12 +330,12 @@ WARNING: Reindex uses the document source to populate the destination index. space-saving optimsation that should only be applied if you are certain that reindexing will not be required in the future! It's important to carefully consider this trade-off and make sure that excluding the ELSER terms from the -source aligns with your specific requirements and use case. Review the -<> and <> sections carefully to learn +source aligns with your specific requirements and use case. Review the +<> and <> sections carefully to learn more about the possible consequences of excluding the tokens from the `_source`. -The mapping that excludes `content_embedding` from the `_source` field can be -created by the following API call: +The mapping that excludes `content_embedding` from the `_source` field can be +created by the following API call: [source,console] ---- @@ -352,10 +349,10 @@ PUT my-index }, "properties": { "content_embedding": { - "type": "sparse_vector" + "type": "sparse_vector" }, - "content": { - "type": "text" + "content": { + "type": "text" } } } @@ -363,6 +360,10 @@ PUT my-index ---- // TEST[skip:TBD] +[NOTE] +==== +Depending on your data, the text expansion query may be faster with `track_total_hits: false`. +==== [discrete] [[further-reading]] diff --git a/docs/reference/security/authentication/jwt-realm.asciidoc b/docs/reference/security/authentication/jwt-realm.asciidoc index 142c93286c2e9..68e20380449a5 100644 --- a/docs/reference/security/authentication/jwt-realm.asciidoc +++ b/docs/reference/security/authentication/jwt-realm.asciidoc @@ -123,8 +123,9 @@ Instructs the realm to treat and validate incoming JWTs as ID Tokens (`id_token` Specifies the client authentication type as `shared_secret`, which means that the client is authenticated using an HTTP request header that must match a pre-configured secret value. The client must provide this shared secret with -every request in the `ES-Client-Authentication` header. The header value must be a -case-sensitive match to the realm's `client_authentication.shared_secret`. +every request in the `ES-Client-Authentication` header and using the +`SharedSecret` scheme. The header value must be a case-sensitive match +to the realm's `client_authentication.shared_secret`. `allowed_issuer`:: Sets a verifiable identifier for your JWT issuer. This value is typically a @@ -519,6 +520,7 @@ After mapping the roles, you can make an <> to {es} using a JWT and include the `ES-Client-Authentication` header: +[[jwt-auth-shared-secret-scheme-example]] [source,sh] ---- curl -s -X GET -H "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJhdWQiOlsiZXMwMSIsImVzMDIiLCJlczAzIl0sInN1YiI6InVzZXIyIiwiaXNzIjoibXktaXNzdWVyIiwiZXhwIjo0MDcwOTA4ODAwLCJpYXQiOjk0NjY4NDgwMCwiZW1haWwiOiJ1c2VyMkBzb21ldGhpbmcuZXhhbXBsZS5jb20ifQ.UgO_9w--EoRyUKcWM5xh9SimTfMzl1aVu6ZBsRWhxQA" -H "ES-Client-Authentication: sharedsecret test-secret" https://localhost:9200/_security/_authenticate diff --git a/docs/reference/setup/important-settings/path-settings.asciidoc b/docs/reference/setup/important-settings/path-settings.asciidoc index 0b46a35db7262..3e87d504963a2 100644 --- a/docs/reference/setup/important-settings/path-settings.asciidoc +++ b/docs/reference/setup/important-settings/path-settings.asciidoc @@ -127,6 +127,6 @@ double the size of your cluster so it will only work if you have the capacity to expand your cluster like this. If you currently use multiple data paths but your cluster is not highly -available then the you can migrate to a non-deprecated configuration by taking +available then you can migrate to a non-deprecated configuration by taking a snapshot, creating a new cluster with the desired configuration and restoring the snapshot into it. diff --git a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc index 8269ba376f878..2b2090405af60 100644 --- a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc @@ -59,12 +59,13 @@ the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a -`blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, and a -`max_total_data_size` of at least `1tb`. Always specify a generous timeout, -possibly `1h` or longer, to allow time for each analysis to run to completion. -Perform the analyses using a multi-node cluster of a similar size to your -production cluster so that it can detect any problems that only arise when the -repository is accessed by many nodes at once. +`blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a +`max_total_data_size` of at least `1tb`, and a `register_operation_count` of at +least `100`. Always specify a generous timeout, possibly `1h` or longer, to +allow time for each analysis to run to completion. Perform the analyses using a +multi-node cluster of a similar size to your production cluster so that it can +detect any problems that only arise when the repository is accessed by many +nodes at once. If the analysis fails then {es} detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system @@ -141,8 +142,10 @@ between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` -parameter. The blob-level tasks are distributed over the data and -master-eligible nodes in the cluster for execution. +parameter, and a number of compare-and-exchange operations on linearizable +registers, as set by the `register_operation_count` parameter. These tasks are +distributed over the data and master-eligible nodes in the cluster for +execution. For most blob-level tasks, the executing node first writes a blob to the repository, and then instructs some of the other nodes in the cluster to @@ -175,6 +178,20 @@ complete. In this case it still instructs some of the other nodes in the cluster to attempt to read the blob, but all of these reads must fail to find the blob. +Linearizable registers are special blobs that {es} manipulates using an atomic +compare-and-exchange operation. This operation ensures correct and +strongly-consistent behavior even when the blob is accessed by multiple nodes +at the same time. The detailed implementation of the compare-and-exchange +operation on linearizable registers varies by repository type. Repository +analysis verifies that that uncontended compare-and-exchange operations on a +linearizable register blob always succeed. Repository analysis also verifies +that contended operations either succeed or report the contention but do not +return incorrect results. If an operation fails due to contention, {es} retries +the operation until it succeeds. Most of the compare-and-exchange operations +performed by repository analysis atomically increment a counter which is +represented as an 8-byte blob. Some operations also verify the behavior on +small blobs with sizes other than 8 bytes. + [[repo-analysis-api-path-params]] ==== {api-path-parms-title} @@ -200,6 +217,11 @@ this to at least `2gb`. the blobs written during the test. Defaults to `1gb`. For realistic experiments you should set this to at least `1tb`. +`register_operation_count`:: +(Optional, integer) The minimum number of linearizable register operations to +perform in total. Defaults to `10`. For realistic experiments you should set +this to at least `100`. + `timeout`:: (Optional, <>) Specifies the period of time to wait for the test to complete. If no response is received before the timeout expires, diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index e848ec9620cb4..35cf454906050 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -257,3 +257,15 @@ following naming rules: permitted in container names. * All letters in a container name must be lowercase. * Container names must be from 3 through 63 characters long. + +[[repository-azure-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for Azure repositories is based on +Azure's support for strongly consistent leases. Each lease may only be held by +a single node at any time. The node presents its lease when performing a read +or write operation on a protected blob. Lease-protected operations fail if the +lease is invalid or expired. To perform a compare-and-exchange operation on a +register, {es} first obtains a lease on the blob, then reads the blob contents +under the lease, and finally uploads the updated blob under the same lease. +This process ensures that the read and write operations happen atomically. diff --git a/docs/reference/snapshot-restore/repository-gcs.asciidoc b/docs/reference/snapshot-restore/repository-gcs.asciidoc index d99b9bc81567f..b359952715a73 100644 --- a/docs/reference/snapshot-restore/repository-gcs.asciidoc +++ b/docs/reference/snapshot-restore/repository-gcs.asciidoc @@ -275,3 +275,13 @@ The service account used to access the bucket must have the "Writer" access to t 3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]. 4. Select the bucket and "Edit bucket permission". 5. The service account must be configured as a "User" with "Writer" access. + +[[repository-gcs-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for GCS repositories is based on GCS's +support for strongly consistent preconditions on put-blob operations. To +perform a compare-and-exchange operation on a register, {es} retrieves the +register blob and its current generation, and then uploads the updated blob +using the observed generation as its precondition. The precondition ensures +that the generation has not changed in the meantime. diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 70993f5b515b3..032d4f47bf678 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -12,7 +12,7 @@ https://www.elastic.co/cloud/.* To register an S3 repository, specify the type as `s3` when creating the repository. The repository defaults to using https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS -IAM Role] credentials for authentication. You can also use <> Kubernetes service accounts. +IAM Role] credentials for authentication. You can also use <> for authentication. The only mandatory setting is the bucket name: @@ -133,6 +133,12 @@ settings belong in the `elasticsearch.yml` file. The port of a proxy to connect to S3 through. +`proxy.scheme`:: + + The scheme to use for the proxy connection to S3. Valid values are either `http` or `https`. + Defaults to `http`. This setting allows to specify the protocol used for communication with the + proxy server + `proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: The username to connect to the `proxy.host` with. @@ -198,75 +204,6 @@ pattern then you should set this setting to `true` when upgrading. https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/ClientConfiguration.html#setSignerOverride-java.lang.String-[AWS Java SDK documentation] for details. Defaults to empty string which means that no signing algorithm override will be used. -[discrete] -[[repository-s3-compatible-services]] -===== S3-compatible services - -There are a number of storage systems that provide an S3-compatible API, and -the `repository-s3` type allows you to use these systems in place of AWS S3. -To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the -system's endpoint. This setting accepts IP addresses and hostnames and may -include a port. For example, the endpoint may be `172.17.0.2` or -`172.17.0.2:9000`. - -By default {es} communicates with your storage system using HTTPS, and -validates the repository's certificate chain using the JVM-wide truststore. -Ensure that the JVM-wide truststore includes an entry for your repository. If -you wish to use unsecured HTTP communication instead of HTTPS, set -`s3.client.CLIENT_NAME.protocol` to `http`. - -https://minio.io[MinIO] is an example of a storage system that provides an -S3-compatible API. The `repository-s3` type allows {es} to work with -MinIO-backed repositories as well as repositories stored on AWS S3. Other -S3-compatible storage systems may also work with {es}, but these are not -covered by the {es} test suite. - -Note that some storage systems claim to be S3-compatible but do not faithfully -emulate S3's behaviour in full. The `repository-s3` type requires full -compatibility with S3. In particular it must support the same set of API -endpoints, return the same errors in case of failures, and offer consistency and -performance at least as good as S3 even when accessed concurrently by multiple -nodes. You will need to work with the supplier of your storage system to address -any incompatibilities you encounter. Please do not report {es} issues involving -storage systems which claim to be S3-compatible unless you can demonstrate that -the same issue exists when using a genuine AWS S3 repository. - -You can perform some basic checks of the suitability of your storage system -using the {ref}/repo-analysis-api.html[repository analysis API]. If this API -does not complete successfully, or indicates poor performance, then your -storage system is not fully compatible with AWS S3 and therefore unsuitable for -use as a snapshot repository. However, these checks do not guarantee full -compatibility. - -Most storage systems can be configured to log the details of their interaction -with {es}. If you are investigating a suspected incompatibility with AWS S3, it -is usually simplest to collect these logs and provide them to the supplier of -your storage system for further analysis. If the incompatibility is not clear -from the logs emitted by the storage system, configure {es} to log every -request it makes to the S3 API by <> of the `com.amazonaws.request` logger to `DEBUG`: - -[source,console] ----- -PUT /_cluster/settings -{ - "persistent": { - "logger.com.amazonaws.request": "DEBUG" - } -} ----- -// TEST[skip:we don't really want to change this logger] - -Collect the Elasticsearch logs covering the time period of the failed analysis -from all nodes in your cluster and share them with the supplier of your storage -system along with the analysis response so they can use them to determine the -problem. See the -https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-logging.html[AWS Java SDK] -documentation for further information, including details about other loggers -that can be used to obtain even more verbose logs. When you have finished -collecting the logs needed by your supplier, set the logger settings back to -`null` to return to the default logging configuration. See <> -and <> for more information. [[repository-s3-repository]] ==== Repository settings @@ -401,7 +338,7 @@ This sets up a repository that uses all client settings from the client `my.s3.endpoint` by the repository settings. [[repository-s3-permissions]] -===== Recommended S3 permissions +==== Recommended S3 permissions In order to restrict the Elasticsearch snapshot process to the minimum required resources, we recommend using Amazon IAM in conjunction with pre-existing S3 @@ -493,7 +430,28 @@ bucket, in this example, named "foo". The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository registration will fail. -===== Cleaning up multi-part uploads +[[iam-kubernetes-service-accounts]] +[discrete] +===== Using IAM roles for Kubernetes service accounts for authentication + +If you want to use https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/[Kubernetes service accounts] +for authentication, you need to add a symlink to the `$AWS_WEB_IDENTITY_TOKEN_FILE` environment variable +(which should be automatically set by a Kubernetes pod) in the S3 repository config directory, so the repository +can have the read access for the service account (a repository can't read any files outside its config directory). +For example: + +[source,bash] +---- +mkdir -p "${ES_PATH_CONF}/repository-s3" +ln -s $AWS_WEB_IDENTITY_TOKEN_FILE "${ES_PATH_CONF}/repository-s3/aws-web-identity-token-file" +---- + +IMPORTANT: The symlink must be created on all data and master eligible nodes and be readable +by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using uid:gid `1000:0`. + +If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. + +==== Cleaning up multi-part uploads {es} uses S3's multi-part upload process to upload larger blobs to the repository. The multi-part upload process works by dividing each blob into @@ -521,7 +479,6 @@ a bucket lifecycle policy] to automatically abort incomplete uploads once they reach a certain age. [[repository-s3-aws-vpc]] -[discrete] ==== AWS VPC bandwidth settings AWS instances resolve S3 endpoints to a public IP. If the Elasticsearch @@ -537,23 +494,81 @@ bandwidth of your VPC's NAT instance. Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance. +[[repository-s3-compatible-services]] +==== S3-compatible services -[[iam-kubernetes-service-accounts]] -[discrete] -==== Using IAM roles for Kubernetes service accounts for authentication -If you want to use https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/[Kubernetes service accounts] -for authentication, you need to add a symlink to the `$AWS_WEB_IDENTITY_TOKEN_FILE` environment variable -(which should be automatically set by a Kubernetes pod) in the S3 repository config directory, so the repository -can have the read access for the service account (a repository can't read any files outside its config directory). -For example: +There are a number of storage systems that provide an S3-compatible API, and +the `repository-s3` type allows you to use these systems in place of AWS S3. +To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the +system's endpoint. This setting accepts IP addresses and hostnames and may +include a port. For example, the endpoint may be `172.17.0.2` or +`172.17.0.2:9000`. -[source,bash] +By default {es} communicates with your storage system using HTTPS, and +validates the repository's certificate chain using the JVM-wide truststore. +Ensure that the JVM-wide truststore includes an entry for your repository. If +you wish to use unsecured HTTP communication instead of HTTPS, set +`s3.client.CLIENT_NAME.protocol` to `http`. + +https://minio.io[MinIO] is an example of a storage system that provides an +S3-compatible API. The `repository-s3` type allows {es} to work with +MinIO-backed repositories as well as repositories stored on AWS S3. Other +S3-compatible storage systems may also work with {es}, but these are not +covered by the {es} test suite. + +Note that some storage systems claim to be S3-compatible but do not faithfully +emulate S3's behaviour in full. The `repository-s3` type requires full +compatibility with S3. In particular it must support the same set of API +endpoints, return the same errors in case of failures, and offer consistency and +performance at least as good as S3 even when accessed concurrently by multiple +nodes. You will need to work with the supplier of your storage system to address +any incompatibilities you encounter. Please do not report {es} issues involving +storage systems which claim to be S3-compatible unless you can demonstrate that +the same issue exists when using a genuine AWS S3 repository. + +You can perform some basic checks of the suitability of your storage system +using the {ref}/repo-analysis-api.html[repository analysis API]. If this API +does not complete successfully, or indicates poor performance, then your +storage system is not fully compatible with AWS S3 and therefore unsuitable for +use as a snapshot repository. However, these checks do not guarantee full +compatibility. + +Most storage systems can be configured to log the details of their interaction +with {es}. If you are investigating a suspected incompatibility with AWS S3, it +is usually simplest to collect these logs and provide them to the supplier of +your storage system for further analysis. If the incompatibility is not clear +from the logs emitted by the storage system, configure {es} to log every +request it makes to the S3 API by <> of the `com.amazonaws.request` logger to `DEBUG`: + +[source,console] ---- -mkdir -p "${ES_PATH_CONF}/repository-s3" -ln -s $AWS_WEB_IDENTITY_TOKEN_FILE "${ES_PATH_CONF}/repository-s3/aws-web-identity-token-file" +PUT /_cluster/settings +{ + "persistent": { + "logger.com.amazonaws.request": "DEBUG" + } +} ---- +// TEST[skip:we don't really want to change this logger] -IMPORTANT: The symlink must be created on all data and master eligible nodes and be readable -by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using uid:gid `1000:0`. +Collect the Elasticsearch logs covering the time period of the failed analysis +from all nodes in your cluster and share them with the supplier of your storage +system along with the analysis response so they can use them to determine the +problem. See the +https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-logging.html[AWS Java SDK] +documentation for further information, including details about other loggers +that can be used to obtain even more verbose logs. When you have finished +collecting the logs needed by your supplier, set the logger settings back to +`null` to return to the default logging configuration. See <> +and <> for more information. -If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. +[[repository-s3-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for S3 repositories is based on the +strongly consistent semantics of the multipart upload API. {es} first creates a +multipart upload to indicate its intention to perform a linearizable register +operation. {es} then lists and cancels all other multipart uploads for the same +register. {es} then attempts to complete the upload. If the upload completes +successfully then the compare-and-exchange operation was atomic. diff --git a/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc index 0bd64d43f1381..6be49d9d4422f 100644 --- a/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc +++ b/docs/reference/snapshot-restore/repository-shared-file-system.asciidoc @@ -84,3 +84,12 @@ each node, but for these accounts to have different numeric user or group IDs. If your shared file system uses NFS then ensure that every node is running with the same numeric UID and GID, or else update your NFS configuration to account for the variance in numeric IDs across nodes. + +[[repository-fs-linearizable-registers]] +==== Linearizable register implementation + +The linearizable register implementation for shared filesystem repositories is +based around file locking. To perform a compare-and-exchange operation on a +register, {es} first locks he underlying file and then writes the updated +contents under the same lock. This ensures that the file has not changed in the +meantime. diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index 1f986bfea8c70..1dee7f0840ade 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -111,13 +111,13 @@ Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the ` [source, sql] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesIncludeFrozen] +include-tagged::{sql-specs}/docs/docs-frozen.csv-spec[showTablesIncludeFrozen] ---- [source, sql] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableIncludeFrozen] +include-tagged::{sql-specs}/docs/docs-frozen.csv-spec[fromTableIncludeFrozen] ---- Unless enabled, frozen indices are completely ignored; it is as if they do not exist and as such, queries ran against them are likely to fail. diff --git a/docs/reference/tab-widgets/api-call-widget.asciidoc b/docs/reference/tab-widgets/api-call-widget.asciidoc new file mode 100644 index 0000000000000..adc2aa86f1c0e --- /dev/null +++ b/docs/reference/tab-widgets/api-call-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::api-call.asciidoc[tag=cloud] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/api-call.asciidoc b/docs/reference/tab-widgets/api-call.asciidoc new file mode 100644 index 0000000000000..ecbd49eae7f8f --- /dev/null +++ b/docs/reference/tab-widgets/api-call.asciidoc @@ -0,0 +1,57 @@ +// tag::cloud[] +**Use {kib}** + +//tag::kibana-api-ex[] +. Open {kib}'s main menu ("*☰*" near Elastic logo) and go to **Dev Tools > Console**. ++ +[role="screenshot"] +image::images/kibana-console.png[{kib} Console,align="center"] + +. Run the following test API request in Console: ++ +[source,console] +---- +GET / +---- + +//end::kibana-api-ex[] + +**Use curl** + +To communicate with {es} using curl or another client, you need your cluster's +endpoint. + +. Open {kib}'s main menu and click **Manage this deployment**. + +. From your deployment menu, go to the **Elasticsearch** page. Click **Copy +endpoint**. + +. To submit an example API request, run the following curl command in a new +terminal session. Replace `` with the password for the `elastic` user. +Replace `` with your endpoint. ++ +[source,sh] +---- +curl -u elastic: / +---- +// NOTCONSOLE + +// end::cloud[] + +// tag::self-managed[] +**Use {kib}** + +include::api-call.asciidoc[tag=kibana-api-ex] + +**Use curl** + +To submit an example API request, run the following curl command in a new +terminal session. + +[source,sh] +---- +curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 +---- +// NOTCONSOLE + +// end::self-managed[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/code.asciidoc b/docs/reference/tab-widgets/code.asciidoc new file mode 100644 index 0000000000000..a6949b681edc6 --- /dev/null +++ b/docs/reference/tab-widgets/code.asciidoc @@ -0,0 +1,163 @@ +// Defining styles and script here for simplicity. +++++ + + +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/esql/esql-getting-started.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc similarity index 89% rename from docs/reference/tab-widgets/esql/esql-getting-started.asciidoc rename to docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc index 0ebcb7c92e59f..b8998ef199c99 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-discover-console.asciidoc @@ -34,6 +34,9 @@ FROM sample_data include::../../esql/esql-kibana.asciidoc[tag=esql-mode] +Adjust the time filter so it includes the timestamps in the sample data (October +23rd, 2023). + After switching to {esql} mode, the query bar shows a sample query. You can replace this query with the queries in this getting started guide. diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc new file mode 100644 index 0000000000000..a1898dffda684 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc @@ -0,0 +1,66 @@ +// tag::own-deployment[] + +The following requests create and execute a policy called `clientip_policy`. The +policy links an IP address to an environment ("Development", "QA", or +"Production"): + +[source,console] +---- +PUT clientips +{ + "mappings": { + "properties": { + "client.ip": { + "type": "keyword" + }, + "env": { + "type": "keyword" + } + } + } +} + +PUT clientips/_bulk +{ "index" : {}} +{ "client.ip": "172.21.0.5", "env": "Development" } +{ "index" : {}} +{ "client.ip": "172.21.2.113", "env": "QA" } +{ "index" : {}} +{ "client.ip": "172.21.2.162", "env": "QA" } +{ "index" : {}} +{ "client.ip": "172.21.3.15", "env": "Production" } +{ "index" : {}} +{ "client.ip": "172.21.3.16", "env": "Production" } + +PUT /_enrich/policy/clientip_policy +{ + "match": { + "indices": "clientips", + "match_field": "client.ip", + "enrich_fields": ["env"] + } +} + +PUT /_enrich/policy/clientip_policy/_execute?wait_for_completion=false +---- +// TEST[s/\?wait_for_completion=false//] + +//// +[source,console] +---- +DELETE /_enrich/policy/clientip_policy +---- +// TEST[continued] +//// + +// end::own-deployment[] + + +// tag::demo-env[] + +On the demo environment at https://esql.demo.elastic.co/[esql.demo.elastic.co], +an enrich policy called `clientip_policy` has already been created an executed. +The policy links an IP address to an environment ("Development", "QA", or +"Production") + +// end::demo-env[] diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc new file mode 100644 index 0000000000000..434954d8d400a --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc @@ -0,0 +1,48 @@ +// tag::own-deployment[] + +First ingest some sample data. In {kib}, open the main menu and select *Dev +Tools*. Run the the following two requests: + +[source,console] +---- +PUT sample_data +{ + "mappings": { + "properties": { + "client.ip": { + "type": "ip" + }, + "message": { + "type": "keyword" + } + } + } +} + +PUT sample_data/_bulk +{"index": {}} +{"@timestamp": "2023-10-23T12:15:03.360Z", "client.ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event.duration": 3450233} +{"index": {}} +{"@timestamp": "2023-10-23T12:27:28.948Z", "client.ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event.duration": 2764889} +{"index": {}} +{"@timestamp": "2023-10-23T13:33:34.937Z", "client.ip": "172.21.0.5", "message": "Disconnected", "event.duration": 1232382} +{"index": {}} +{"@timestamp": "2023-10-23T13:51:54.732Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 725448} +{"index": {}} +{"@timestamp": "2023-10-23T13:52:55.015Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 8268153} +{"index": {}} +{"@timestamp": "2023-10-23T13:53:55.832Z", "client.ip": "172.21.3.15", "message": "Connection error", "event.duration": 5033755} +{"index": {}} +{"@timestamp": "2023-10-23T13:55:01.543Z", "client.ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event.duration": 1756467} +---- + +// end::own-deployment[] + + +// tag::demo-env[] + +The data set used in this guide has been preloaded into the Elastic {esql} +public demo environment. Visit +https://esql.demo.elastic.co/[esql.demo.elastic.co] to start using it. + +// end::demo-env[] diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc similarity index 72% rename from docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc rename to docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc index 49dc573f3b0bb..dff80e25812c3 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started-widget.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-discover-console.asciidoc @@ -1,6 +1,6 @@ ++++ -
    -
    +
    +
    @@ -31,7 +31,7 @@ include::esql-getting-started.asciidoc[tag=console] hidden=""> ++++ -include::esql-getting-started.asciidoc[tag=discover] +include::esql-getting-started-discover-console.asciidoc[tag=discover] ++++
    diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc new file mode 100644 index 0000000000000..cafefeb2652e4 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc @@ -0,0 +1,39 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::esql-getting-started-enrich-policy.asciidoc[tag=own-deployment] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc new file mode 100644 index 0000000000000..4a33cf3f08866 --- /dev/null +++ b/docs/reference/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc @@ -0,0 +1,39 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::esql-getting-started-sample-data.asciidoc[tag=own-deployment] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/quick-start-install-widget.asciidoc b/docs/reference/tab-widgets/quick-start-install-widget.asciidoc new file mode 100644 index 0000000000000..f3ff804ade255 --- /dev/null +++ b/docs/reference/tab-widgets/quick-start-install-widget.asciidoc @@ -0,0 +1,40 @@ +++++ +
    +
    + + +
    +
    +++++ + +include::quick-start-install.asciidoc[tag=cloud] + +++++ +
    + +
    +++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/quick-start-install.asciidoc b/docs/reference/tab-widgets/quick-start-install.asciidoc new file mode 100644 index 0000000000000..b8daf62dad63b --- /dev/null +++ b/docs/reference/tab-widgets/quick-start-install.asciidoc @@ -0,0 +1,71 @@ + +// tag::cloud[] +include::{docs-root}/shared/cloud/ess-getting-started.asciidoc[tag=generic] + +. Click **Continue** to open {kib}, the user interface for {ecloud}. + +. Click **Explore on my own**. +// end::cloud[] + +// tag::self-managed[] +*Start a single-node cluster* + +We'll use a single-node {es} cluster in this quick start, which makes sense for testing and development. +Refer to <> for advanced Docker documentation. + +. Run the following Docker commands: ++ +[source,sh,subs="attributes"] +---- +docker network create elastic +docker pull {docker-image} +docker run --name es01 --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t {docker-image} +---- + +. Copy the generated `elastic` password and enrollment token, which are output to your terminal. +You'll use these to enroll {kib} with your {es} cluster and log in. +These credentials are only shown when you start {es} for the first time. ++ +We recommend storing the `elastic` password as an environment variable in your shell. Example: ++ +[source,sh] +---- +export ELASTIC_PASSWORD="your_password" +---- ++ +. Copy the `http_ca.crt` SSL certificate from the container to your local machine. ++ +[source,sh] +---- +docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . +---- ++ +. Make a REST API call to {es} to ensure the {es} container is running. ++ +[source,sh] +---- +curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 +---- +// NOTCONSOLE + +*Run {kib}* + +{kib} is the user interface for Elastic. +It's great for getting started with {es} and exploring your data. +We'll be using the Dev Tools *Console* in {kib} to make REST API calls to {es}. + +In a new terminal session, start {kib} and connect it to your {es} container: + +[source,sh,subs="attributes"] +---- +docker pull {kib-docker-image} +docker run --name kibana --net elastic -p 5601:5601 {kib-docker-image} +---- + +When you start {kib}, a unique URL is output to your terminal. +To access {kib}: + +. Open the generated URL in your browser. +. Paste the enrollment token that you copied earlier, to connect your {kib} instance with {es}. +. Log in to {kib} as the `elastic` user with the password that was generated when you started {es}. +// end::self-managed[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc index 2fe2f9cea83f9..b702a1fc8f426 100644 --- a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc @@ -63,9 +63,7 @@ PUT my-index "properties": { "my_embeddings.predicted_value": { <1> "type": "dense_vector", <2> - "dims": 384,<3> - "index": true, - "similarity": "cosine" + "dims": 384 <3> }, "my_text_field": { <4> "type": "text" <5> diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 94ed94df43818..e8d94ce624dbb 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -17,6 +17,7 @@ commons-codec = "commons-codec:commons-codec:1.11" commmons-io = "commons-io:commons-io:2.2" docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5" forbiddenApis = "de.thetaphi:forbiddenapis:3.6" +gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.14.1" hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" httpclient = "org.apache.httpcomponents:httpclient:4.5.10" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index e340efb0c6987..ed7ae1b5b5638 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -69,9 +69,9 @@ - - - + + + @@ -381,6 +381,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -1401,19 +1421,19 @@ - - - + + + - - - + + + - - - + + + @@ -2894,14 +2914,9 @@ - - - - - - - - + + + @@ -3002,14 +3017,19 @@ - - - + + + + + + + + - - - + + + @@ -3022,9 +3042,9 @@ - - - + + + @@ -3032,19 +3052,24 @@ - - - - - - - - + + + + + + + + + + + + + @@ -4067,6 +4092,11 @@ + + + + + @@ -4127,6 +4157,11 @@ + + + + + @@ -4157,6 +4192,11 @@ + + + + + diff --git a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java index 04cd4375a42be..ca5704fa9866d 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java @@ -19,6 +19,7 @@ public abstract class AbstractRefCounted implements RefCounted { public static final String ALREADY_CLOSED_MESSAGE = "already closed, can't increment ref count"; + public static final String INVALID_DECREF_MESSAGE = "invalid decRef call: already closed"; private static final VarHandle VH_REFCOUNT_FIELD; @@ -63,7 +64,7 @@ public final boolean tryIncRef() { public final boolean decRef() { touch(); int i = (int) VH_REFCOUNT_FIELD.getAndAdd(this, -1); - assert i > 0 : "invalid decRef call: already closed"; + assert i > 0 : INVALID_DECREF_MESSAGE; if (i == 1) { try { closeInternal(); diff --git a/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java b/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java index 6698b47f62f3c..56325dc21bb4a 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java +++ b/libs/core/src/main/java/org/elasticsearch/core/CheckedConsumer.java @@ -8,12 +8,20 @@ package org.elasticsearch.core; -import java.util.function.Consumer; +import java.util.Objects; /** - * A {@link Consumer}-like interface which allows throwing checked exceptions. + * A {@link java.util.function.Consumer}-like interface which allows throwing checked exceptions. */ @FunctionalInterface public interface CheckedConsumer { void accept(T t) throws E; + + default CheckedConsumer andThen(CheckedConsumer after) throws E { + Objects.requireNonNull(after); + return (T t) -> { + accept(t); + after.accept(t); + }; + } } diff --git a/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java index 0f7dec4968ba7..1f725ac48a16f 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java @@ -38,7 +38,7 @@ public interface RefCounted { void incRef(); /** - * Tries to increment the refCount of this instance. This method will return {@code true} iff the refCount was + * Tries to increment the refCount of this instance. This method will return {@code true} iff the refCount was successfully incremented. * * @see #decRef() * @see #incRef() @@ -62,4 +62,16 @@ public interface RefCounted { * @return whether there are currently any active references to this object. */ boolean hasReferences(); + + /** + * Similar to {@link #incRef()} except that it also asserts that it managed to acquire the ref, for use in situations where it is a bug + * if all refs have been released. + */ + default void mustIncRef() { + if (tryIncRef()) { + return; + } + assert false : AbstractRefCounted.ALREADY_CLOSED_MESSAGE; + incRef(); // throws an ISE + } } diff --git a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java index 7d25b5a6163c1..5153ba688d6a9 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java @@ -18,6 +18,8 @@ public enum RestApiVersion { V_8(8), + + @UpdateForV9 // v9 will not need to support the v7 REST API V_7(7); public final byte major; diff --git a/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java new file mode 100644 index 0000000000000..2a31e2ccde222 --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV9.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.core; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to identify a block of code (a whole class, a method, or a field) that needs to be reviewed (for cleanup, remove or change) + * before releasing 9.0 + */ +@Retention(RetentionPolicy.SOURCE) +@Target({ ElementType.LOCAL_VARIABLE, ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface UpdateForV9 { +} diff --git a/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java b/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java index 3f7de0bfd4e0e..65a126920cdb9 100644 --- a/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java +++ b/libs/lz4/src/test/java/org/elasticsearch/lz4/ESLZ4Tests.java @@ -1898,80 +1898,4 @@ public void testRoundtripIssue12() { testRoundTrip(data, 9, data.length - 9); } - private static void assertCompressedArrayEquals(String message, byte[] expected, byte[] actual) { - int off = 0; - int decompressedOff = 0; - while (true) { - if (off == expected.length) { - break; - } - final Sequence sequence1 = readSequence(expected, off); - final Sequence sequence2 = readSequence(actual, off); - assertEquals(message + ", off=" + off + ", decompressedOff=" + decompressedOff, sequence1, sequence2); - off += sequence1.length; - decompressedOff += sequence1.literalLen + sequence1.matchLen; - } - } - - private static Sequence readSequence(byte[] buf, int off) { - final int start = off; - final int token = buf[off++] & 0xFF; - int literalLen = token >>> 4; - if (literalLen >= 0x0F) { - int len; - while ((len = buf[off++] & 0xFF) == 0xFF) { - literalLen += 0xFF; - } - literalLen += len; - } - off += literalLen; - if (off == buf.length) { - return new Sequence(literalLen, -1, -1, off - start); - } - int matchDec = (buf[off++] & 0xFF) | ((buf[off++] & 0xFF) << 8); - int matchLen = token & 0x0F; - if (matchLen >= 0x0F) { - int len; - while ((len = buf[off++] & 0xFF) == 0xFF) { - matchLen += 0xFF; - } - matchLen += len; - } - matchLen += 4; - return new Sequence(literalLen, matchDec, matchLen, off - start); - } - - private static class Sequence { - final int literalLen, matchDec, matchLen, length; - - private Sequence(int literalLen, int matchDec, int matchLen, int length) { - this.literalLen = literalLen; - this.matchDec = matchDec; - this.matchLen = matchLen; - this.length = length; - } - - @Override - public String toString() { - return "Sequence [literalLen=" + literalLen + ", matchDec=" + matchDec + ", matchLen=" + matchLen + "]"; - } - - @Override - public int hashCode() { - return 42; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null) return false; - if (getClass() != obj.getClass()) return false; - Sequence other = (Sequence) obj; - if (literalLen != other.literalLen) return false; - if (matchDec != other.matchDec) return false; - if (matchLen != other.matchLen) return false; - return true; - } - - } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java index 6e517f731843b..610fb444e0a93 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java @@ -10,7 +10,6 @@ import java.nio.file.Path; import java.security.GeneralSecurityException; -import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -53,12 +52,7 @@ public record SslConfiguration( static { LinkedHashMap protocolAlgorithmMap = new LinkedHashMap<>(); - try { - SSLContext.getInstance("TLSv1.3"); - protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); - } catch (NoSuchAlgorithmException e) { - // ignore since we support JVMs using BCJSSE in FIPS mode which doesn't support TLSv1.3 - } + protocolAlgorithmMap.put("TLSv1.3", "TLSv1.3"); protocolAlgorithmMap.put("TLSv1.2", "TLSv1.2"); protocolAlgorithmMap.put("TLSv1.1", "TLSv1.1"); protocolAlgorithmMap.put("TLSv1", "TLSv1"); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java index 37e782cd7c611..44e708e00d4d5 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/AdjacencyMatrixIT.java @@ -61,19 +61,19 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numTag1Docs; i++) { numSingleTag1Docs++; XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted // docs that match the filter - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); i++) { numSingleTag2Docs++; XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).field("tag", "tag2").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs + numTag2Docs; i < numDocs; i++) { @@ -81,17 +81,15 @@ public void setupSuiteScopeCluster() throws Exception { numTag1Docs++; numTag2Docs++; XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).array("tag", "tag1", "tag2").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, builders); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java index 4d64ad1030136..a6e530a9d66cf 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java @@ -12,9 +12,9 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.aggregations.AggregationsPlugin; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; @@ -80,8 +80,7 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception { BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int j = 0; j < numberOfDocsPerRefresh; j++) { bulkRequestBuilder.add( - client().prepareIndex("test") - .setOpType(DocWriteRequest.OpType.CREATE) + prepareIndex("test").setOpType(DocWriteRequest.OpType.CREATE) .setSource( "@timestamp", now + (long) i * numberOfDocsPerRefresh + j, @@ -116,7 +115,7 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception { ) .execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, searchResponse::actionGet); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java index 3f7d52c32e8df..2050ce20b1aee 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsIT.java @@ -170,7 +170,7 @@ public void setupSuiteScopeCluster() throws Exception { tsValues.put(timestamp, metrics); docSource.field("@timestamp", timestamp); docSource.endObject(); - docs.add(client().prepareIndex("index" + findIndex(timestamp)).setOpType(DocWriteRequest.OpType.CREATE).setSource(docSource)); + docs.add(prepareIndex("index" + findIndex(timestamp)).setOpType(DocWriteRequest.OpType.CREATE).setSource(docSource)); } indexRandom(true, false, docs); } @@ -503,20 +503,20 @@ public void testGetHitsFailure() throws Exception { ); client().prepareBulk() - .add(client().prepareIndex("test").setId("2").setSource("key", "bar", "val", 2, "@timestamp", "2021-01-01T00:00:10Z")) - .add(client().prepareIndex("test").setId("1").setSource("key", "bar", "val", 10, "@timestamp", "2021-01-01T00:00:00Z")) + .add(prepareIndex("test").setId("2").setSource("key", "bar", "val", 2, "@timestamp", "2021-01-01T00:00:10Z")) + .add(prepareIndex("test").setId("1").setSource("key", "bar", "val", 10, "@timestamp", "2021-01-01T00:00:00Z")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareBulk() - .add(client().prepareIndex("test").setId("4").setSource("key", "bar", "val", 50, "@timestamp", "2021-01-01T00:00:30Z")) - .add(client().prepareIndex("test").setId("3").setSource("key", "bar", "val", 40, "@timestamp", "2021-01-01T00:00:20Z")) + .add(prepareIndex("test").setId("4").setSource("key", "bar", "val", 50, "@timestamp", "2021-01-01T00:00:30Z")) + .add(prepareIndex("test").setId("3").setSource("key", "bar", "val", 40, "@timestamp", "2021-01-01T00:00:20Z")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareBulk() - .add(client().prepareIndex("test").setId("7").setSource("key", "foo", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) - .add(client().prepareIndex("test").setId("8").setSource("key", "foo", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) - .add(client().prepareIndex("test").setId("5").setSource("key", "baz", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) - .add(client().prepareIndex("test").setId("6").setSource("key", "baz", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) + .add(prepareIndex("test").setId("7").setSource("key", "foo", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) + .add(prepareIndex("test").setId("8").setSource("key", "foo", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) + .add(prepareIndex("test").setId("5").setSource("key", "baz", "val", 20, "@timestamp", "2021-01-01T00:00:00Z")) + .add(prepareIndex("test").setId("6").setSource("key", "baz", "val", 30, "@timestamp", "2021-01-01T00:10:00Z")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java index 7fddc65ac3e03..5c58b7f7bff5a 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java @@ -67,7 +67,7 @@ public void setup() throws Exception { final BulkRequestBuilder bulkIndexRequest = client().prepareBulk(); for (int docId = 0; docId < numberOfDocuments; docId++) { final XContentBuilder document = timeSeriesDocument(FOO_DIM_VALUE, BAR_DIM_VALUE, BAZ_DIM_VALUE, docId, timestamps::next); - bulkIndexRequest.add(client().prepareIndex("index").setOpType(DocWriteRequest.OpType.CREATE).setSource(document)); + bulkIndexRequest.add(prepareIndex("index").setOpType(DocWriteRequest.OpType.CREATE).setSource(document)); } final BulkResponse bulkIndexResponse = bulkIndexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java index 14bae46e1e00f..ce7e4c63dc69c 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java @@ -64,21 +64,20 @@ protected Collection> nodePlugins() { } private static IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { - return client().prepareIndex(idx).setSource(jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); + return prepareIndex(idx).setSource(jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); } private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .timeField("date", date(month, day)) - .startArray("dates") - .timeValue(date(month, day)) - .timeValue(date(month + 1, day + 1)) - .endArray() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } @Override @@ -90,9 +89,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } builders.addAll( diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java index e0c91689b333d..7cbb298f49931 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java @@ -142,8 +142,9 @@ public void setupSuiteScopeCluster() throws Exception { for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) { for (double value : mockBucket.docValues) { builders.add( - client().prepareIndex("idx") - .setSource(jsonBuilder().startObject().field(INTERVAL_FIELD, mockBucket.key).field(VALUE_FIELD, value).endObject()) + prepareIndex("idx").setSource( + jsonBuilder().startObject().field(INTERVAL_FIELD, mockBucket.key).field(VALUE_FIELD, value).endObject() + ) ); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index a25bbe0a6d0be..5a036a59b4bca 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -152,14 +151,14 @@ private void setFiltersAsMap(Map filters) { } // internally we want to have a fixed order of filters, regardless of // the order of the filters in the request - Collections.sort(this.filters, Comparator.comparing(KeyedFilter::key)); + this.filters.sort(Comparator.comparing(KeyedFilter::key)); } private AdjacencyMatrixAggregationBuilder setFiltersAsList(List filters) { this.filters = new ArrayList<>(filters); // internally we want to have a fixed order of filters, regardless of // the order of the filters in the request - Collections.sort(this.filters, Comparator.comparing(KeyedFilter::key)); + this.filters.sort(Comparator.comparing(KeyedFilter::key)); return this; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 84765a1432210..c17cc004e25b5 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -181,11 +180,7 @@ public InternalAggregation reduce(List aggregations, Aggreg for (InternalAggregation aggregation : aggregations) { InternalAdjacencyMatrix filters = (InternalAdjacencyMatrix) aggregation; for (InternalBucket bucket : filters.buckets) { - List sameRangeList = bucketsMap.get(bucket.key); - if (sameRangeList == null) { - sameRangeList = new ArrayList<>(aggregations.size()); - bucketsMap.put(bucket.key, sameRangeList); - } + List sameRangeList = bucketsMap.computeIfAbsent(bucket.key, k -> new ArrayList<>(aggregations.size())); sameRangeList.add(bucket); } } @@ -198,11 +193,9 @@ public InternalAggregation reduce(List aggregations, Aggreg } } reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); - Collections.sort(reducedBuckets, Comparator.comparing(InternalBucket::getKey)); + reducedBuckets.sort(Comparator.comparing(InternalBucket::getKey)); - InternalAdjacencyMatrix reduced = new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); - - return reduced; + return new InternalAdjacencyMatrix(name, reducedBuckets, getMetadata()); } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java index efd5e498c9a91..1c558db86e8eb 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/ParsedAdjacencyMatrix.java @@ -48,11 +48,7 @@ public ParsedBucket getBucketByKey(String key) { ParsedAdjacencyMatrix::new ); static { - declareMultiBucketAggregationFields( - PARSER, - parser -> ParsedBucket.fromXContent(parser), - parser -> ParsedBucket.fromXContent(parser) - ); + declareMultiBucketAggregationFields(PARSER, ParsedBucket::fromXContent, ParsedBucket::fromXContent); } public static ParsedAdjacencyMatrix fromXContent(XContentParser parser, String name) throws IOException { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index dd497e8ca5478..d096012c3d634 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -173,7 +173,7 @@ public String getMinimumIntervalExpression() { public AutoDateHistogramAggregationBuilder setMinimumIntervalExpression(String minimumIntervalExpression) { if (minimumIntervalExpression != null && ALLOWED_INTERVALS.containsValue(minimumIntervalExpression) == false) { throw new IllegalArgumentException( - MINIMUM_INTERVAL_FIELD.getPreferredName() + " must be one of [" + ALLOWED_INTERVALS.values().toString() + "]" + MINIMUM_INTERVAL_FIELD.getPreferredName() + " must be one of [" + ALLOWED_INTERVALS.values() + "]" ); } this.minimumIntervalExpression = minimumIntervalExpression; @@ -210,9 +210,8 @@ protected ValuesSourceAggregatorFactory innerBuild( int maxRoundingInterval = Arrays.stream(roundings, 0, roundings.length - 1) .map(rounding -> rounding.innerIntervals) .flatMapToInt(Arrays::stream) - .boxed() .reduce(Integer::max) - .get(); + .getAsInt(); Settings settings = context.getIndexSettings().getNodeSettings(); int maxBuckets = MultiBucketConsumerService.MAX_BUCKET_SETTING.get(settings); int bucketCeiling = maxBuckets / maxRoundingInterval; @@ -287,7 +286,7 @@ public RoundingInfo( this.innerIntervals = innerIntervals; Objects.requireNonNull(dateTimeUnit, "dateTimeUnit cannot be null"); if (ALLOWED_INTERVALS.containsKey(dateTimeUnit) == false) { - throw new IllegalArgumentException("dateTimeUnit must be one of " + ALLOWED_INTERVALS.keySet().toString()); + throw new IllegalArgumentException("dateTimeUnit must be one of " + ALLOWED_INTERVALS.keySet()); } this.dateTimeUnit = ALLOWED_INTERVALS.get(dateTimeUnit); } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index c0b89b915229d..491ec3fe6f95d 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -479,7 +479,7 @@ private int collectValue(long owningBucketOrd, int roundingIdx, int doc, long ro /** * Increase the rounding of {@code owningBucketOrd} using - * estimated, bucket counts, {@link #rebucket() rebucketing} the all + * estimated, bucket counts, {@link FromMany#rebucket() rebucketing} the all * buckets if the estimated number of wasted buckets is too high. */ private int increaseRoundingIfNeeded(long owningBucketOrd, int oldEstimatedBucketCount, long newKey, int oldRounding) { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java index f517291d96d90..be244a2c62da3 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java @@ -74,7 +74,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private final AutoDateHistogramAggregatorSupplier aggregatorSupplier; private final int numBuckets; - private RoundingInfo[] roundingInfos; + private final RoundingInfo[] roundingInfos; public AutoDateHistogramAggregatorFactory( String name, diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 4593d6901513a..c058fb5743369 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -422,30 +422,14 @@ protected Bucket reduceBucket(List buckets, AggregationReduceContext con return new InternalAutoDateHistogram.Bucket(buckets.get(0).key, docCount, format, aggs); } - private static class BucketReduceResult { - final List buckets; - final int roundingIdx; - final long innerInterval; - final Rounding.Prepared preparedRounding; - final long min; - final long max; - - BucketReduceResult( - List buckets, - int roundingIdx, - long innerInterval, - Rounding.Prepared preparedRounding, - long min, - long max - ) { - this.buckets = buckets; - this.roundingIdx = roundingIdx; - this.innerInterval = innerInterval; - this.preparedRounding = preparedRounding; - this.min = min; - this.max = max; - } - } + private record BucketReduceResult( + List buckets, + int roundingIdx, + long innerInterval, + Rounding.Prepared preparedRounding, + long min, + long max + ) {} private BucketReduceResult addEmptyBuckets(BucketReduceResult current, AggregationReduceContext reduceContext) { List list = current.buckets; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java index 9a46d71205012..05c2e928fd84f 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java @@ -19,7 +19,7 @@ * Class to encapsulate a set of ValuesSource objects labeled by field name */ public abstract class ArrayValuesSource { - protected MultiValueMode multiValueMode; + protected final MultiValueMode multiValueMode; protected String[] names; protected VS[] values; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java index 0c44946ac96a0..2866a08e8608e 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java @@ -86,7 +86,7 @@ public double getCorrelation(String fieldX, String fieldY) { @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); - if (counts != null && counts.isEmpty() == false) { + if (counts.isEmpty() == false) { builder.startArray(InternalMatrixStats.Fields.FIELDS); for (String fieldName : counts.keySet()) { builder.startObject(); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java index e4b8e15cd5e1b..9b8ea7321582b 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java @@ -178,10 +178,8 @@ public void add(final String[] fieldNames, final double[] fieldVals) { private void updateCovariance(final String[] fieldNames, final Map deltas) { // deep copy of hash keys (field names) ArrayList cFieldNames = new ArrayList<>(Arrays.asList(fieldNames)); - String fieldName; double dR, newVal; - for (int i = 0; i < fieldNames.length; ++i) { - fieldName = fieldNames[i]; + for (String fieldName : fieldNames) { cFieldNames.remove(fieldName); // update running covariances dR = deltas.get(fieldName); @@ -231,12 +229,12 @@ public void merge(final RunningStats other) { } else if (this.docCount == 0) { for (Map.Entry fs : other.means.entrySet()) { final String fieldName = fs.getKey(); - this.means.put(fieldName, fs.getValue().doubleValue()); - this.counts.put(fieldName, other.counts.get(fieldName).longValue()); - this.fieldSum.put(fieldName, other.fieldSum.get(fieldName).doubleValue()); - this.variances.put(fieldName, other.variances.get(fieldName).doubleValue()); - this.skewness.put(fieldName, other.skewness.get(fieldName).doubleValue()); - this.kurtosis.put(fieldName, other.kurtosis.get(fieldName).doubleValue()); + this.means.put(fieldName, fs.getValue()); + this.counts.put(fieldName, other.counts.get(fieldName)); + this.fieldSum.put(fieldName, other.fieldSum.get(fieldName)); + this.variances.put(fieldName, other.variances.get(fieldName)); + this.skewness.put(fieldName, other.skewness.get(fieldName)); + this.kurtosis.put(fieldName, other.kurtosis.get(fieldName)); if (other.covariances.containsKey(fieldName)) { this.covariances.put(fieldName, other.covariances.get(fieldName)); } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java index 23abc8a328601..3b8559c77fddc 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregator.java @@ -24,9 +24,9 @@ import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue; public class BucketSelectorPipelineAggregator extends PipelineAggregator { - private GapPolicy gapPolicy; - private Script script; - private Map bucketsPathsMap; + private final GapPolicy gapPolicy; + private final Script script; + private final Map bucketsPathsMap; BucketSelectorPipelineAggregator( String name, diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java index ad26d8ed59438..57b60df785673 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java @@ -84,7 +84,7 @@ public class BucketSortPipelineAggregationBuilder extends AbstractPipelineAggreg private GapPolicy gapPolicy = GapPolicy.SKIP; public BucketSortPipelineAggregationBuilder(String name, List sorts) { - super(name, NAME, sorts == null ? new String[0] : sorts.stream().map(s -> s.getFieldName()).toArray(String[]::new)); + super(name, NAME, sorts == null ? new String[0] : sorts.stream().map(FieldSortBuilder::getFieldName).toArray(String[]::new)); this.sorts = sorts == null ? Collections.emptyList() : sorts; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 35d5a97aa854f..1132507d520f4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -69,7 +69,7 @@ public class MovFnPipelineAggregationBuilder extends AbstractPipelineAggregation } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, GAP_POLICY, ObjectParser.ValueType.STRING); - }; + } public MovFnPipelineAggregationBuilder(String name, String bucketsPath, Script script, int window) { super(name, NAME, new String[] { bucketsPath }); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java index 7431e806d96e4..c9debf89e8162 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregator.java @@ -50,7 +50,6 @@ public class MovFnPipelineAggregator extends PipelineAggregator { private final DocValueFormat formatter; private final BucketHelpers.GapPolicy gapPolicy; private final Script script; - private final String bucketsPath; private final int window; private final int shift; @@ -65,7 +64,6 @@ public class MovFnPipelineAggregator extends PipelineAggregator { Map metadata ) { super(name, new String[] { bucketsPath }, metadata); - this.bucketsPath = bucketsPath; this.script = script; this.formatter = formatter; this.gapPolicy = gapPolicy; @@ -136,9 +134,6 @@ private static int clamp(int index, List list) { if (index < 0) { return 0; } - if (index > list.size()) { - return list.size(); - } - return index; + return Math.min(index, list.size()); } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java index f8cf953c4caf3..22877c5bbc32b 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java @@ -67,8 +67,7 @@ public void setUp() throws Exception { @Override protected InternalAdjacencyMatrix createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final List buckets = new ArrayList<>(); - for (int i = 0; i < keys.size(); ++i) { - String key = keys.get(i); + for (String key : keys) { int docCount = randomIntBetween(0, 1000); buckets.add(new InternalAdjacencyMatrix.InternalBucket(key, docCount, aggregations)); } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java index 8a3a061750a87..880d223442e29 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java @@ -183,7 +183,7 @@ public void testMultiBucketAggregationAsSubAggregation() throws IOException { public void testAggregationSize() throws IOException { CheckedConsumer buildIndex = multiTsWriter(); - List> verifiers = new ArrayList>(); + List> verifiers = new ArrayList<>(); verifiers.add(ts -> assertThat(ts.getBucketByKey("{dim1=aaa, dim2=xxx}").docCount, equalTo(2L))); verifiers.add(ts -> assertThat(ts.getBucketByKey("{dim1=aaa, dim2=yyy}").docCount, equalTo(2L))); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java index 6a43f02697e26..7275cd26cae65 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java @@ -22,12 +22,12 @@ class MultiPassStats { private final String fieldBKey; private long count; - private Map means = new HashMap<>(); - private Map variances = new HashMap<>(); - private Map skewness = new HashMap<>(); - private Map kurtosis = new HashMap<>(); - private Map> covariances = new HashMap<>(); - private Map> correlations = new HashMap<>(); + private final Map means = new HashMap<>(); + private final Map variances = new HashMap<>(); + private final Map skewness = new HashMap<>(); + private final Map kurtosis = new HashMap<>(); + private final Map> covariances = new HashMap<>(); + private final Map> correlations = new HashMap<>(); MultiPassStats(String fieldAName, String fieldBName) { this.fieldAKey = fieldAName; diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java index bb67c8da7eca4..a025f03d0eafc 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilderTests.java @@ -40,7 +40,7 @@ protected BucketSortPipelineAggregationBuilder createTestAggregatorFactory() { sorts.add(fieldSortBuilder); } BucketSortPipelineAggregationBuilder factory = new BucketSortPipelineAggregationBuilder(randomAlphaOfLengthBetween(3, 20), sorts); - Integer from = randomIntBetween(0, 20); + int from = randomIntBetween(0, 20); Integer size = randomBoolean() ? randomIntBetween(1, 1000) : null; if (randomBoolean()) { factory.from(from); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java index 70eb63c5e61da..68245d31c14b6 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/pipeline/DerivativeAggregatorTests.java @@ -218,7 +218,7 @@ public void testSingleValueAggDerivative() throws IOException { Object[] propertiesDocCounts = (Object[]) histogram.getProperty("_count"); Object[] propertiesSumCounts = (Object[]) histogram.getProperty("sum.value"); - Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets // overwritten for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -270,7 +270,7 @@ public void testMultiValueAggDerivative() throws IOException { Object[] propertiesDocCounts = (Object[]) histogram.getProperty("_count"); Object[] propertiesSumCounts = (Object[]) histogram.getProperty("stats.sum"); - Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets // overwritten for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -670,8 +670,8 @@ public void testDerivDerivNPE() throws IOException { } } - private Long getTotalDocCountAcrossBuckets(List buckets) { - Long count = 0L; + private long getTotalDocCountAcrossBuckets(List buckets) { + long count = 0L; for (Histogram.Bucket bucket : buckets) { count += bucket.getDocCount(); } diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml index 4d4848e8aebc3..5cf0265374b08 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/top_hits.yml @@ -600,10 +600,13 @@ synthetic _source: - do: bulk: index: test2 + refresh: true body: - { index: { } } - { gender: 3 } - do: + # The script can't process a bucket without a salary value for gender '3'. + catch: /path not supported for \[top_salary_hits\]:\ \[_source.salary\]./ search: index: test2 size: 0 @@ -630,13 +633,6 @@ synthetic _source: ts: top_salary_hits[_source.salary] script: "params.ts < 8000" - # Empty bucket for gender '3' affects nothing. - - length: { aggregations.genders.buckets: 1} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.total.value: 4} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.hits.0._source.gender: 1} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.hits.0._source.salary: 4000} - - match: { aggregations.genders.buckets.0.top_salary_hits.hits.hits.0._source.birth_date: 1982} - - do: catch: /path not supported for \[top_salary_hits\]:\ \[_source.nosuchfield\]./ search: diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java index 32e20aea3c2e1..bb450f1cc43ee 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -49,7 +49,7 @@ public void testCustomWordDelimiterQueryString() { .setMapping("field1", "type=text,analyzer=my_analyzer", "field2", "type=text,analyzer=my_analyzer") ); - client().prepareIndex("test").setId("1").setSource("field1", "foo bar baz", "field2", "not needed").get(); + prepareIndex("test").setId("1").setSource("field1", "foo bar baz", "field2", "not needed").get(); refresh(); assertHitCount( diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java index da8b431b4eda3..2ef1a7639e597 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadAnalyzerTests.java @@ -91,8 +91,8 @@ private Path setupSynonyms() throws IOException { .setMapping("field", "type=text,analyzer=standard,search_analyzer=" + SYNONYM_ANALYZER_NAME) ); - client().prepareIndex(INDEX_NAME).setId("1").setSource("field", "Foo").get(); - assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).execute().actionGet()); + prepareIndex(INDEX_NAME).setId("1").setSource("field", "Foo").get(); + assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).get()); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "buzz")), 0L); @@ -161,8 +161,8 @@ public void testSynonymsInMultiplexerUpdateable() throws FileNotFoundException, .setMapping("field", "type=text,analyzer=standard,search_analyzer=" + SYNONYM_ANALYZER_NAME) ); - client().prepareIndex(INDEX_NAME).setId("1").setSource("field", "foo").get(); - assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).execute().actionGet()); + prepareIndex(INDEX_NAME).setId("1").setSource("field", "foo").get(); + assertNoFailures(indicesAdmin().prepareRefresh(INDEX_NAME).get()); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); assertHitCount(client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchQuery("field", "buzz")), 0L); diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java index f0063f663142d..d55dbd0f1d783 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java @@ -70,8 +70,8 @@ private void testSynonymsUpdate(boolean preview) throws FileNotFoundException, I .setMapping("field", "type=text,analyzer=standard,search_analyzer=my_synonym_analyzer") ); - client().prepareIndex("test").setId("1").setSource("field", "foo").get(); - assertNoFailures(indicesAdmin().prepareRefresh("test").execute().actionGet()); + prepareIndex("test").setId("1").setSource("field", "foo").get(); + assertNoFailures(indicesAdmin().prepareRefresh("test").get()); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")), 0L); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 90a8d3379775f..35face57b8294 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -91,6 +91,7 @@ import org.apache.lucene.analysis.ru.RussianAnalyzer; import org.apache.lucene.analysis.shingle.ShingleFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; @@ -197,6 +198,7 @@ public Map>> getAn analyzers.put("portuguese", PortugueseAnalyzerProvider::new); analyzers.put("romanian", RomanianAnalyzerProvider::new); analyzers.put("russian", RussianAnalyzerProvider::new); + analyzers.put("serbian", SerbianAnalyzerProvider::new); analyzers.put("sorani", SoraniAnalyzerProvider::new); analyzers.put("spanish", SpanishAnalyzerProvider::new); analyzers.put("swedish", SwedishAnalyzerProvider::new); @@ -447,6 +449,7 @@ public List getPreBuiltAnalyzerProviderFactorie analyzers.add(new PreBuiltAnalyzerProviderFactory("portuguese", CachingStrategy.LUCENE, PortugueseAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("romanian", CachingStrategy.LUCENE, RomanianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("russian", CachingStrategy.LUCENE, RussianAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("serbian", CachingStrategy.LUCENE, SerbianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("sorani", CachingStrategy.LUCENE, SoraniAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("spanish", CachingStrategy.LUCENE, SpanishAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("swedish", CachingStrategy.LUCENE, SwedishAnalyzer::new)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java new file mode 100644 index 0000000000000..567502b75bced --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; + +public class SerbianAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final SerbianAnalyzer analyzer; + + SerbianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(name, settings); + analyzer = new SerbianAnalyzer( + Analysis.parseStopWords(env, settings, SerbianAnalyzer.getDefaultStopSet()), + Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET) + ); + } + + @Override + public SerbianAnalyzer get() { + return this.analyzer; + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index 8f9a882e29d2a..7385987567fb0 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -70,6 +70,7 @@ import org.tartarus.snowball.ext.PortugueseStemmer; import org.tartarus.snowball.ext.RomanianStemmer; import org.tartarus.snowball.ext.RussianStemmer; +import org.tartarus.snowball.ext.SerbianStemmer; import org.tartarus.snowball.ext.SpanishStemmer; import org.tartarus.snowball.ext.SwedishStemmer; import org.tartarus.snowball.ext.TurkishStemmer; @@ -237,6 +238,9 @@ public TokenStream create(TokenStream tokenStream) { } else if ("light_russian".equalsIgnoreCase(language) || "lightRussian".equalsIgnoreCase(language)) { return new RussianLightStemFilter(tokenStream); + } else if ("serbian".equalsIgnoreCase(language)) { + return new SnowballFilter(tokenStream, new SerbianStemmer()); + // Spanish stemmers } else if ("spanish".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new SpanishStemmer()); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java index e7e9aa32b1684..2693245ac2757 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -8,13 +8,10 @@ package org.elasticsearch.analysis.common; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.Operator; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,9 +27,9 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight; -import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -112,12 +109,20 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { .putList("analysis.analyzer.search_autocomplete.filter", "lowercase", "wordDelimiter") ) ); - client().prepareIndex("test").setId("1").setSource("name", "ARCOTEL Hotels Deutschland").get(); + prepareIndex("test").setId("1").setSource("name", "ARCOTEL Hotels Deutschland").get(); refresh(); - SearchResponse search = prepareSearch("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)) - .highlighter(new HighlightBuilder().field("name.autocomplete")) - .get(); - assertHighlight(search, 0, "name.autocomplete", 0, equalTo("ARCOTEL Hotels Deutschland")); + assertResponse( + prepareSearch("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)) + .highlighter(new HighlightBuilder().field("name.autocomplete")), + response -> assertHighlight( + response, + 0, + "name.autocomplete", + 0, + equalTo("ARCOTEL Hotels Deutschland") + ) + ); + } public void testMultiPhraseCutoff() throws IOException { @@ -146,8 +151,7 @@ public void testMultiPhraseCutoff() throws IOException { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "body", "Test: http://www.facebook.com http://elasticsearch.org " @@ -158,30 +162,34 @@ public void testMultiPhraseCutoff() throws IOException { ) .get(); refresh(); - SearchResponse search = prepareSearch().setQuery(matchPhraseQuery("body", "Test: http://www.facebook.com ")) - .highlighter(new HighlightBuilder().field("body").highlighterType("fvh")) - .get(); - assertHighlight(search, 0, "body", 0, startsWith("Test: http://www.facebook.com")); - search = prepareSearch().setQuery( - matchPhraseQuery( + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("body", "Test: http://www.facebook.com ")) + .highlighter(new HighlightBuilder().field("body").highlighterType("fvh")), + response -> assertHighlight(response, 0, "body", 0, startsWith("Test: http://www.facebook.com")) + ); + + assertResponse( + prepareSearch().setQuery( + matchPhraseQuery( + "body", + "Test: http://www.facebook.com " + + "http://elasticsearch.org http://xing.com http://cnn.com " + + "http://quora.com http://twitter.com this is a test for highlighting " + + "feature Test: http://www.facebook.com http://elasticsearch.org " + + "http://xing.com http://cnn.com http://quora.com http://twitter.com this " + + "is a test for highlighting feature" + ) + ).highlighter(new HighlightBuilder().field("body").highlighterType("fvh")), + response -> assertHighlight( + response, + 0, "body", - "Test: http://www.facebook.com " - + "http://elasticsearch.org http://xing.com http://cnn.com " - + "http://quora.com http://twitter.com this is a test for highlighting " - + "feature Test: http://www.facebook.com http://elasticsearch.org " - + "http://xing.com http://cnn.com http://quora.com http://twitter.com this " - + "is a test for highlighting feature" - ) - ).highlighter(new HighlightBuilder().field("body").highlighterType("fvh")).execute().actionGet(); - assertHighlight( - search, - 0, - "body", - 0, - equalTo( - "Test: " - + "http://www.facebook.com http://elasticsearch.org " - + "http://xing.com http://cnn.com http://quora.com" + 0, + equalTo( + "Test: " + + "http://www.facebook.com http://elasticsearch.org " + + "http://xing.com http://cnn.com http://quora.com" + ) ) ); } @@ -203,19 +211,28 @@ public void testSynonyms() throws IOException { ); ensureGreen(); - client().prepareIndex("test").setId("0").setSource("field1", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setId("0").setSource("field1", "The quick brown fox jumps over the lazy dog").get(); refresh(); for (String highlighterType : new String[] { "plain", "fvh", "unified" }) { logger.info("--> highlighting (type=" + highlighterType + ") and searching on field1"); - SearchSourceBuilder source = searchSource().query(matchQuery("field1", "quick brown fox").operator(Operator.AND)) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("").highlighterType(highlighterType)); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - - source = searchSource().query(matchQuery("field1", "fast brown fox").operator(Operator.AND)) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + assertResponse( + prepareSearch("test").setQuery(matchQuery("field1", "quick brown fox").operator(Operator.AND)) + .highlighter( + highlight().field("field1").order("score").preTags("").postTags("").highlighterType(highlighterType) + ), + resp -> { + assertHighlight(resp, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchQuery("field1", "fast brown fox").operator(Operator.AND)) + .highlighter( + highlight().field("field1").order("score").preTags("").postTags("").highlighterType(highlighterType) + ), + resp -> { + assertHighlight(resp, 0, "field1", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); } } @@ -231,78 +248,73 @@ public void testPhrasePrefix() throws IOException { ensureGreen(); - client().prepareIndex("first_test_index") - .setId("0") + prepareIndex("first_test_index").setId("0") .setSource("field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog") .get(); - client().prepareIndex("first_test_index") - .setId("1") - .setSource("field1", "The quick browse button is a fancy thing, right bro?") - .get(); + prepareIndex("first_test_index").setId("1").setSource("field1", "The quick browse button is a fancy thing, right bro?").get(); refresh(); logger.info("--> highlighting and searching on field0"); - SearchSourceBuilder source = searchSource().query(matchPhrasePrefixQuery("field0", "bro")) - .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); - SearchResponse searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - - assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - - source = searchSource().query(matchPhrasePrefixQuery("field0", "quick bro")) - .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); + assertResponse( + prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field0", "bro")) + .highlighter(highlight().field("field0").order("score").preTags("").postTags("")), + resp -> { + assertHighlight(resp, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); - searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + assertResponse( + prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field0", "quick bro")) + .highlighter(highlight().field("field0").order("score").preTags("").postTags("")), + resp -> { + assertHighlight(resp, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } + ); logger.info("--> highlighting and searching on field1"); - source = searchSource().query( - boolQuery().should(matchPhrasePrefixQuery("field1", "test")).should(matchPhrasePrefixQuery("field1", "bro")) - ).highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - - searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - for (int i = 0; i < 2; i++) { - assertHighlight( - searchResponse, - i, - "field1", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) - ); - } - - source = searchSource().query(matchPhrasePrefixQuery("field1", "quick bro")) - .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - - searchResponse = client().search(new SearchRequest("first_test_index").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + assertResponse( + prepareSearch("first_test_index").setQuery( + boolQuery().should(matchPhrasePrefixQuery("field1", "test")).should(matchPhrasePrefixQuery("field1", "bro")) + ).highlighter(highlight().field("field1").order("score").preTags("").postTags("")), + resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(2L)); + for (int i = 0; i < 2; i++) { + assertHighlight( + resp, + i, + "field1", + 0, + 1, + anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog") + ) + ); + } + } ); - assertHighlight( - searchResponse, - 1, - "field1", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + + assertResponse( + prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field1", "quick bro")) + .highlighter(highlight().field("field1").order("score").preTags("").postTags("")), + resp -> { + for (int i = 0; i < 2; i++) { + assertHighlight( + resp, + i, + "field1", + 0, + 1, + anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog") + ) + ); + } + } ); + // with synonyms assertAcked( prepareCreate("second_test_index").setSettings(builder.build()) .setMapping( @@ -312,9 +324,7 @@ public void testPhrasePrefix() throws IOException { "type=text,analyzer=synonym" ) ); - // with synonyms - client().prepareIndex("second_test_index") - .setId("0") + prepareIndex("second_test_index").setId("0") .setSource( "type", "type2", @@ -324,63 +334,55 @@ public void testPhrasePrefix() throws IOException { "The quick brown fox jumps over the lazy dog" ) .get(); - client().prepareIndex("second_test_index") - .setId("1") + prepareIndex("second_test_index").setId("1") .setSource("type", "type2", "field4", "The quick browse button is a fancy thing, right bro?") .get(); - client().prepareIndex("second_test_index").setId("2").setSource("type", "type2", "field4", "a quick fast blue car").get(); + prepareIndex("second_test_index").setId("2").setSource("type", "type2", "field4", "a quick fast blue car").get(); refresh(); - source = searchSource().postFilter(termQuery("type", "type2")) - .query(matchPhrasePrefixQuery("field3", "fast bro")) - .highlighter(highlight().field("field3").order("score").preTags("").postTags("")); - - searchResponse = client().search(new SearchRequest("second_test_index").source(source)).actionGet(); - - assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - - logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(termQuery("type", "type2")) - .query(matchPhrasePrefixQuery("field4", "the fast bro")) - .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); - searchResponse = client().search(new SearchRequest("second_test_index").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field4", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + assertResponse( + prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field3", "fast bro")) + .highlighter(highlight().field("field3").order("score").preTags("").postTags("")), + resp -> { + assertHighlight(resp, 0, "field3", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); + } ); - assertHighlight( - searchResponse, - 1, - "field4", - 0, - 1, - anyOf( - equalTo("The quick browse button is a fancy thing, right bro?"), - equalTo("The quick brown fox jumps over the lazy dog") - ) + + assertResponse( + prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field4", "the fast bro")) + .highlighter(highlight().field("field4").order("score").preTags("").postTags("")), + resp -> { + for (int i = 0; i < 2; i++) { + assertHighlight( + resp, + i, + "field4", + 0, + 1, + anyOf( + equalTo("The quick browse button is a fancy thing, right bro?"), + equalTo("The quick brown fox jumps over the lazy dog") + ) + ); + } + } ); logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(termQuery("type", "type2")) - .query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) - .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); - searchResponse = client().search(new SearchRequest("second_test_index").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field4", - 0, - 1, - anyOf(equalTo("a quick fast blue car"), equalTo("a quick fast blue car")) + assertResponse( + prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) + .setPostFilter(termQuery("type", "type2")) + .highlighter(highlight().field("field4").order("score").preTags("").postTags("")), + resp -> { + assertHighlight( + resp, + 0, + "field4", + 0, + 1, + anyOf(equalTo("a quick fast blue car"), equalTo("a quick fast blue car")) + ); + } ); } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml index dcec02729a44e..c03bdb3111050 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/20_analyzers.yml @@ -988,6 +988,35 @@ - length: { tokens: 1 } - match: { tokens.0.token: вмест } +--- +"serbian": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: serbian + + - do: + indices.analyze: + body: + text: будите шампиони + analyzer: serbian + - length: { tokens: 1 } + - match: { tokens.0.token: sampion } + + - do: + indices.analyze: + index: test + body: + text: будите шампиони + analyzer: my_analyzer + - length: { tokens: 1 } + - match: { tokens.0.token: sampion } + --- "sorani": - do: diff --git a/modules/apm/build.gradle b/modules/apm/build.gradle index c9002a71bf746..13f1ac4a4cd3e 100644 --- a/modules/apm/build.gradle +++ b/modules/apm/build.gradle @@ -18,7 +18,7 @@ dependencies { implementation "io.opentelemetry:opentelemetry-api:${otelVersion}" implementation "io.opentelemetry:opentelemetry-context:${otelVersion}" implementation "io.opentelemetry:opentelemetry-semconv:${otelVersion}-alpha" - runtimeOnly "co.elastic.apm:elastic-apm-agent:1.43.0" + runtimeOnly "co.elastic.apm:elastic-apm-agent:1.44.0" } tasks.named("dependencyLicenses").configure { diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index 07bbc5c55f7cd..51f008db646fa 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -12,19 +12,23 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.telemetry.apm.internal.metrics.DoubleAsyncCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleGaugeAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleHistogramAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleUpDownCounterAdapter; +import org.elasticsearch.telemetry.apm.internal.metrics.LongAsyncCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongCounterAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongGaugeAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongHistogramAdapter; import org.elasticsearch.telemetry.apm.internal.metrics.LongUpDownCounterAdapter; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleCounter; import org.elasticsearch.telemetry.metric.DoubleGauge; import org.elasticsearch.telemetry.metric.DoubleHistogram; import org.elasticsearch.telemetry.metric.DoubleUpDownCounter; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.metric.LongGauge; import org.elasticsearch.telemetry.metric.LongHistogram; @@ -48,6 +52,8 @@ public class APMMeterRegistry implements MeterRegistry { private final Registrar doubleGauges = new Registrar<>(); private final Registrar doubleHistograms = new Registrar<>(); private final Registrar longCounters = new Registrar<>(); + private final Registrar longAsynchronousCounters = new Registrar<>(); + private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar longUpDownCounters = new Registrar<>(); private final Registrar longGauges = new Registrar<>(); private final Registrar longHistograms = new Registrar<>(); @@ -127,6 +133,35 @@ public LongCounter registerLongCounter(String name, String description, String u } } + @Override + public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { + try (ReleasableLock lock = registerLock.acquire()) { + return longAsynchronousCounters.register(new LongAsyncCounterAdapter(meter, name, description, unit, observer)); + } + } + + @Override + public LongAsyncCounter getLongAsyncCounter(String name) { + return longAsynchronousCounters.get(name); + } + + @Override + public DoubleAsyncCounter registerDoubleAsyncCounter( + String name, + String description, + String unit, + Supplier observer + ) { + try (ReleasableLock lock = registerLock.acquire()) { + return doubleAsynchronousCounters.register(new DoubleAsyncCounterAdapter(meter, name, description, unit, observer)); + } + } + + @Override + public DoubleAsyncCounter getDoubleAsyncCounter(String name) { + return doubleAsynchronousCounters.get(name); + } + @Override public LongCounter getLongCounter(String name) { return longCounters.get(name); diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java index 2a806ca19a4e0..bbeaba0f6f088 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java @@ -25,7 +25,7 @@ * @param delegated instrument */ public abstract class AbstractInstrument implements Instrument { - private static final int MAX_NAME_LENGTH = 63; // TODO(stu): change to 255 when we upgrade to otel 1.30+, see #101679 + private static final int MAX_NAME_LENGTH = 255; private final AtomicReference delegate; private final String name; private final String description; diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java new file mode 100644 index 0000000000000..a1ea9f33e31fb --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal.metrics; + +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.ObservableDoubleCounter; + +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.telemetry.apm.AbstractInstrument; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; + +import java.util.Objects; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +public class DoubleAsyncCounterAdapter extends AbstractInstrument implements DoubleAsyncCounter { + private final Supplier observer; + private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); + private boolean closed = false; + + public DoubleAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { + super(meter, name, description, unit); + this.observer = observer; + } + + @Override + protected ObservableDoubleCounter buildInstrument(Meter meter) { + var builder = Objects.requireNonNull(meter).counterBuilder(getName()); + return builder.setDescription(getDescription()).setUnit(getUnit()).ofDoubles().buildWithCallback(measurement -> { + DoubleWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }); + } + + @Override + public void close() throws Exception { + try (ReleasableLock lock = closedLock.acquire()) { + if (closed == false) { + getInstrument().close(); + } + closed = true; + } + } +} diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java new file mode 100644 index 0000000000000..126cca1964283 --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal.metrics; + +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.ObservableLongCounter; + +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.telemetry.apm.AbstractInstrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; +import org.elasticsearch.telemetry.metric.LongWithAttributes; + +import java.util.Objects; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +public class LongAsyncCounterAdapter extends AbstractInstrument implements LongAsyncCounter { + private final Supplier observer; + private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); + private boolean closed = false; + + public LongAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { + super(meter, name, description, unit); + this.observer = observer; + } + + @Override + protected ObservableLongCounter buildInstrument(Meter meter) { + var builder = Objects.requireNonNull(meter).counterBuilder(getName()); + return builder.setDescription(getDescription()).setUnit(getUnit()).buildWithCallback(measurement -> { + LongWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }); + } + + @Override + public void close() throws Exception { + try (ReleasableLock lock = closedLock.acquire()) { + if (closed == false) { + getInstrument().close(); + } + closed = true; + } + } +} diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java index b393edd6e58e3..82dd911d1b821 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java @@ -88,13 +88,13 @@ public void testNoopIsSetOnStop() { public void testMaxNameLength() { APMMeterService apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel); apmMeter.start(); - int max_length = 63; + int max_length = 255; var counter = apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length), "desc", "count"); assertThat(counter, instanceOf(LongCounter.class)); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length + 1), "desc", "count") ); - assertThat(iae.getMessage(), containsString("exceeds maximum length [63]")); + assertThat(iae.getMessage(), containsString("exceeds maximum length [255]")); } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java index 6661653499f63..94627c1e53813 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java @@ -110,8 +110,10 @@ public LongCounter build() { @Override public ObservableLongCounter buildWithCallback(Consumer callback) { - unimplemented(); - return null; + LongAsyncCounterRecorder longAsyncCounter = new LongAsyncCounterRecorder(name, callback); + recorder.register(longAsyncCounter, longAsyncCounter.getInstrument(), name, description, unit); + callbacks.add(longAsyncCounter); + return longAsyncCounter; } @Override @@ -121,6 +123,24 @@ public ObservableLongMeasurement buildObserver() { } } + private class LongAsyncCounterRecorder extends AbstractInstrument implements ObservableLongCounter, Callback, OtelInstrument { + final Consumer callback; + + LongAsyncCounterRecorder(String name, Consumer callback) { + super(name, InstrumentType.LONG_ASYNC_COUNTER); + this.callback = callback; + } + + @Override + public void close() { + callbacks.remove(this); + } + + public void doCall() { + callback.accept(new LongMeasurementRecorder(name, instrument)); + } + } + private class LongRecorder extends LongUpDownRecorder implements LongCounter, OtelInstrument { LongRecorder(String name) { super(name, InstrumentType.LONG_COUNTER); @@ -172,8 +192,10 @@ public DoubleCounter build() { @Override public ObservableDoubleCounter buildWithCallback(Consumer callback) { - unimplemented(); - return null; + DoubleAsyncCounterRecorder doubleAsyncCounter = new DoubleAsyncCounterRecorder(name, callback); + recorder.register(doubleAsyncCounter, doubleAsyncCounter.getInstrument(), name, description, unit); + callbacks.add(doubleAsyncCounter); + return doubleAsyncCounter; } @Override @@ -183,6 +205,24 @@ public ObservableDoubleMeasurement buildObserver() { } } + private class DoubleAsyncCounterRecorder extends AbstractInstrument implements ObservableDoubleCounter, Callback, OtelInstrument { + final Consumer callback; + + DoubleAsyncCounterRecorder(String name, Consumer callback) { + super(name, InstrumentType.DOUBLE_ASYNC_COUNTER); + this.callback = callback; + } + + @Override + public void close() { + callbacks.remove(this); + } + + public void doCall() { + callback.accept(new DoubleMeasurementRecorder(name, instrument)); + } + } + private class DoubleRecorder extends DoubleUpDownRecorder implements DoubleCounter, OtelInstrument { DoubleRecorder(String name) { super(name, InstrumentType.DOUBLE_COUNTER); diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java new file mode 100644 index 0000000000000..fa8706deee870 --- /dev/null +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal.metrics; + +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.apm.APMMeterRegistry; +import org.elasticsearch.telemetry.apm.RecordingOtelMeter; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class AsyncCountersAdapterTests extends ESTestCase { + RecordingOtelMeter otelMeter; + APMMeterRegistry registry; + + @Before + public void init() { + otelMeter = new RecordingOtelMeter(); + registry = new APMMeterRegistry(otelMeter); + } + + // testing that a value reported is then used in a callback + public void testLongAsyncCounter() throws Exception { + AtomicReference attrs = new AtomicReference<>(); + LongAsyncCounter longAsyncCounter = registry.registerLongAsyncCounter("name", "desc", "unit", attrs::get); + + attrs.set(new LongWithAttributes(1L, Map.of("k", 1L))); + + otelMeter.collectMetrics(); + + List metrics = otelMeter.getRecorder().getMeasurements(longAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 1L))); + assertThat(metrics.get(0).getLong(), equalTo(1L)); + + attrs.set(new LongWithAttributes(2L, Map.of("k", 5L))); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(longAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 5L))); + assertThat(metrics.get(0).getLong(), equalTo(2L)); + + longAsyncCounter.close(); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(longAsyncCounter); + assertThat(metrics, hasSize(0)); + } + + public void testDoubleAsyncAdapter() throws Exception { + AtomicReference attrs = new AtomicReference<>(); + DoubleAsyncCounter doubleAsyncCounter = registry.registerDoubleAsyncCounter("name", "desc", "unit", attrs::get); + + attrs.set(new DoubleWithAttributes(1.0, Map.of("k", 1.0))); + + otelMeter.collectMetrics(); + + List metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 1.0))); + assertThat(metrics.get(0).getDouble(), equalTo(1.0)); + + attrs.set(new DoubleWithAttributes(2.0, Map.of("k", 5.0))); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); + assertThat(metrics, hasSize(1)); + assertThat(metrics.get(0).attributes(), equalTo(Map.of("k", 5.0))); + assertThat(metrics.get(0).getDouble(), equalTo(2.0)); + + doubleAsyncCounter.close(); + + otelMeter.getRecorder().resetCalls(); + otelMeter.collectMetrics(); + + metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); + assertThat(metrics, hasSize(0)); + } +} diff --git a/modules/build.gradle b/modules/build.gradle index ad7049a9905f0..7707b60b38b25 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -8,7 +8,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // for modules which publish client jars - apply plugin: 'elasticsearch.internal-testclusters' + // apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-test-artifact' diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 90c76d630f0d0..61b53ea10a786 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -49,10 +49,8 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.MultiSearchRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -86,7 +84,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -122,6 +119,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.arrayWithSize; @@ -436,16 +434,12 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except }"""; PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id_1"); request.indexTemplate( - new ComposableIndexTemplate( - List.of(dataStreamName), // use no wildcard, so that backing indices don't match just by name - new Template(null, new CompressedXContent(mapping), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + // use no wildcard, so that backing indices don't match just by name + .indexPatterns(List.of(dataStreamName)) + .template(new Template(null, new CompressedXContent(mapping), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -518,16 +512,11 @@ public void testTimeStampValidationInvalidFieldMapping() throws Exception { }"""; PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); createTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-*"), - new Template(null, new CompressedXContent(mapping), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-*")) + .template(new Template(null, new CompressedXContent(mapping), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); Exception e = expectThrows( @@ -552,8 +541,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability( dataStreamName, - client().prepareIndex(dataStreamName) - .setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) + prepareIndex(dataStreamName).setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE), false ); @@ -596,8 +584,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { client().execute(CreateDataStreamAction.INSTANCE, request).actionGet(); verifyResolvability( "logs-barbaz", - client().prepareIndex("logs-barbaz") - .setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) + prepareIndex("logs-barbaz").setSource("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE), false ); @@ -673,16 +660,14 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio // Now replace it with a higher-priority template and delete the old one PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - Collections.singletonList("metrics-foobar*"), // Match the other data stream with a slightly different pattern - new Template(null, null, null), - null, - 2L, // Higher priority than the other composable template - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + // Match the other data stream with a slightly different pattern + .indexPatterns(Collections.singletonList("metrics-foobar*")) + .template(new Template(null, null, null)) + // Higher priority than the other composable template + .priority(2L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -715,13 +700,11 @@ public void testAliasActionsOnDataStreams() throws Exception { public void testDataSteamAliasWithFilter() throws Exception { putComposableIndexTemplate("id1", List.of("logs-*")); String dataStreamName = "logs-foobar"; - client().prepareIndex(dataStreamName) - .setId("1") + prepareIndex(dataStreamName).setId("1") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"x\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); - client().prepareIndex(dataStreamName) - .setId("2") + prepareIndex(dataStreamName).setId("2") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"y\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); @@ -790,13 +773,11 @@ public void testDataSteamAliasWithFilter() throws Exception { public void testSearchFilteredAndUnfilteredAlias() throws Exception { putComposableIndexTemplate("id1", List.of("logs-*")); String dataStreamName = "logs-foobar"; - client().prepareIndex(dataStreamName) - .setId("1") + prepareIndex(dataStreamName).setId("1") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"x\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); - client().prepareIndex(dataStreamName) - .setId("2") + prepareIndex(dataStreamName).setId("2") .setSource("{\"@timestamp\": \"2022-12-12\", \"type\": \"y\"}", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); @@ -1212,15 +1193,11 @@ public void testIndexDocsWithCustomRoutingTargetingDataStreamIsNotAllowed() thro } public void testIndexDocsWithCustomRoutingAllowed() throws Exception { - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs-foobar*"), - new Template(null, null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-foobar*")) + .template(new Template(null, null, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("id1").indexTemplate(template) @@ -1358,16 +1335,11 @@ public void testMultipleTimestampValuesInDocument() throws Exception { public void testMixedAutoCreate() throws Exception { PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); createTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("logs-foo*"), - new Template(null, new CompressedXContent(generateMapping("@timestamp")), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs-foo*")) + .template(new Template(null, new CompressedXContent(generateMapping("@timestamp")), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest).actionGet(); @@ -1599,21 +1571,22 @@ public void testSegmentsSortedOnTimestampDesc() throws Exception { indexDocs("metrics-foo", numDocs3); // 3rd segment int totalDocs = numDocs1 + numDocs2 + numDocs3; - SearchSourceBuilder source = new SearchSourceBuilder(); - source.fetchField(new FieldAndFormat(DEFAULT_TIMESTAMP_FIELD, "epoch_millis")); - source.size(totalDocs); - SearchRequest searchRequest = new SearchRequest(new String[] { "metrics-foo" }, source); - SearchResponse searchResponse = client().search(searchRequest).actionGet(); - assertEquals(totalDocs, searchResponse.getHits().getTotalHits().value); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertEquals(totalDocs, hits.length); - - // Test that when we read data, segments come in the reverse order with a segment with the latest date first - long timestamp1 = Long.valueOf(hits[0].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of 1st seg - long timestamp2 = Long.valueOf(hits[0 + numDocs3].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the 2nd seg - long timestamp3 = Long.valueOf(hits[0 + numDocs3 + numDocs2].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the 3rd seg - assertTrue(timestamp1 > timestamp2); - assertTrue(timestamp2 > timestamp3); + assertResponse( + prepareSearch("metrics-foo").addFetchField(new FieldAndFormat(DEFAULT_TIMESTAMP_FIELD, "epoch_millis")).setSize(totalDocs), + resp -> { + assertEquals(totalDocs, resp.getHits().getTotalHits().value); + SearchHit[] hits = resp.getHits().getHits(); + assertEquals(totalDocs, hits.length); + + // Test that when we read data, segments come in the reverse order with a segment with the latest date first + long timestamp1 = Long.valueOf(hits[0].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of 1st seg + long timestamp2 = Long.valueOf(hits[0 + numDocs3].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the 2nd seg + long timestamp3 = Long.valueOf(hits[0 + numDocs3 + numDocs2].field(DEFAULT_TIMESTAMP_FIELD).getValue()); // 1st doc of the + // 3rd seg + assertTrue(timestamp1 > timestamp2); + assertTrue(timestamp2 > timestamp3); + } + ); } public void testCreateDataStreamWithSameNameAsIndexAlias() throws Exception { @@ -1815,7 +1788,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { original.isSystem(), original.isAllowCustomRouting(), original.getIndexMode(), - original.getLifecycle() + original.getLifecycle(), + original.isFailureStore(), + original.getFailureIndices() ); brokenDataStreamHolder.set(broken); return ClusterState.builder(currentState) @@ -1869,11 +1844,12 @@ private static void verifyResolvability( if (fail) { String expectedErrorMessage = "no such index [" + dataStream + "]"; if (requestBuilder instanceof MultiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get(); - assertThat(multiSearchResponse.getResponses().length, equalTo(1)); - assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); - assertThat(multiSearchResponse.getResponses()[0].getFailure(), instanceOf(IllegalArgumentException.class)); - assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), equalTo(expectedErrorMessage)); + assertResponse((MultiSearchRequestBuilder) requestBuilder, multiSearchResponse -> { + assertThat(multiSearchResponse.getResponses().length, equalTo(1)); + assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); + assertThat(multiSearchResponse.getResponses()[0].getFailure(), instanceOf(IllegalArgumentException.class)); + assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), equalTo(expectedErrorMessage)); + }); } else if (requestBuilder instanceof ValidateQueryRequestBuilder) { Exception e = expectThrows(IndexNotFoundException.class, requestBuilder::get); assertThat(e.getMessage(), equalTo(expectedErrorMessage)); @@ -1885,8 +1861,10 @@ private static void verifyResolvability( if (requestBuilder instanceof SearchRequestBuilder searchRequestBuilder) { assertHitCount(searchRequestBuilder, expectedCount); } else if (requestBuilder instanceof MultiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get(); - assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(false)); + assertResponse( + (MultiSearchRequestBuilder) requestBuilder, + multiSearchResponse -> assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(false)) + ); } else { requestBuilder.get(); } @@ -1914,12 +1892,10 @@ static void indexDocs(String dataStream, int numDocs) { } static void verifyDocs(String dataStream, long expectedNumHits, List expectedIndices) { - SearchRequest searchRequest = new SearchRequest(dataStream); - searchRequest.source().size((int) expectedNumHits); - SearchResponse searchResponse = client().search(searchRequest).actionGet(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(expectedNumHits)); - - Arrays.stream(searchResponse.getHits().getHits()).forEach(hit -> { assertTrue(expectedIndices.contains(hit.getIndex())); }); + assertResponse(prepareSearch(dataStream).setSize((int) expectedNumHits), resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(expectedNumHits)); + Arrays.stream(resp.getHits().getHits()).forEach(hit -> assertTrue(expectedIndices.contains(hit.getIndex()))); + }); } static void verifyDocs(String dataStream, long expectedNumHits, long minGeneration, long maxGeneration) { @@ -1938,19 +1914,17 @@ public void testPartitionedTemplate() throws IOException { /** * partition size with no routing required */ - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); ComposableIndexTemplate finalTemplate = template; client().execute( PutComposableIndexTemplateAction.INSTANCE, @@ -1959,24 +1933,22 @@ public void testPartitionedTemplate() throws IOException { /** * partition size with routing required */ - template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) @@ -1985,19 +1957,17 @@ public void testPartitionedTemplate() throws IOException { /** * routing settings with allow custom routing false */ - template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - null, - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ); + template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + null, + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); ComposableIndexTemplate finalTemplate1 = template; Exception e = expectThrows( IllegalArgumentException.class, @@ -2015,24 +1985,22 @@ public void testPartitionedTemplate() throws IOException { } public void testRoutingEnabledInMappingDisabledInDataStreamTemplate() throws IOException { - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("logs"), - new Template( - Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("logs")) + .template( + new Template( + Settings.builder().put("index.number_of_shards", "3").put("index.routing_partition_size", "2").build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); Exception e = expectThrows( IllegalArgumentException.class, () -> client().execute( @@ -2048,37 +2016,34 @@ public void testSearchWithRouting() throws IOException, ExecutionException, Inte /** * partition size with routing required */ - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of("my-logs"), - new Template( - Settings.builder() - .put("index.number_of_shards", "10") - .put("index.number_of_routing_shards", "10") - .put("index.routing_partition_size", "4") - .build(), - new CompressedXContent(""" - { - "_routing": { - "required": true - } - }"""), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, true) - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of("my-logs")) + .template( + new Template( + Settings.builder() + .put("index.number_of_shards", "10") + .put("index.number_of_routing_shards", "10") + .put("index.routing_partition_size", "4") + .build(), + new CompressedXContent(""" + { + "_routing": { + "required": true + } + }"""), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) ).actionGet(); CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("my-logs"); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - SearchRequest searchRequest = new SearchRequest("my-logs").routing("123"); - SearchResponse searchResponse = client().search(searchRequest).actionGet(); - assertEquals(searchResponse.getTotalShards(), 4); + + assertResponse(prepareSearch("my-logs").setRouting("123"), resp -> { assertEquals(resp.getTotalShards(), 4); }); } public void testWriteIndexWriteLoadAndAvgShardSizeIsStoredAfterRollover() throws Exception { @@ -2331,16 +2296,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), aliases, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), aliases, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index ceac7423b0b72..c3e59be54cc7f 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -842,8 +842,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { .setOpType(DocWriteRequest.OpType.CREATE) .setId(Integer.toString(i)) .setSource(Collections.singletonMap("@timestamp", "2020-12-12")) - .execute() - .actionGet(); + .get(); } refresh(); assertDocCount(dataStream, 100L); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java index aeb7516c35816..672d2d21d73a5 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java @@ -33,23 +33,17 @@ public void testDefaultDataStreamAllocateToHot() { startHotOnlyNode(); ensureGreen(); - ComposableIndexTemplate template = new ComposableIndexTemplate( - Collections.singletonList(index), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(index)) + + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); - var dsIndexName = client().prepareIndex(index) - .setCreate(true) + var dsIndexName = prepareIndex(index).setCreate(true) .setId("1") .setSource("@timestamp", "2020-09-09") .setWaitForActiveShards(0) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java index 922b58e3920e1..734e2d7273d19 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java @@ -317,15 +317,11 @@ public Collection getSystemDataStreamDescriptors() { ".test-data-stream", "system data stream test", Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".test-data-stream"), - new Template(Settings.EMPTY, mappings, null), - null, - null, - null, - null, - new DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".test-data-stream")) + .template(new Template(Settings.EMPTY, mappings, null)) + .dataStreamTemplate(new DataStreamTemplate()) + .build(), Map.of(), List.of("product"), ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java index 0f60cbba0a4ff..698656dfa7406 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java @@ -64,8 +64,7 @@ public void testSystemDataStreamInGlobalState() throws Exception { } // Index a doc so that a concrete backing index will be created - DocWriteResponse indexRepsonse = client().prepareIndex(SYSTEM_DATA_STREAM_NAME) - .setId("42") + DocWriteResponse indexRepsonse = prepareIndex(SYSTEM_DATA_STREAM_NAME).setId("42") .setSource("{ \"@timestamp\": \"2099-03-08T11:06:07.000Z\", \"name\": \"my-name\" }", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) .get(); @@ -162,20 +161,16 @@ public void testSystemDataStreamInFeatureState() throws Exception { } // Index a doc so that a concrete backing index will be created - DocWriteResponse indexToDataStreamResponse = client().prepareIndex(SYSTEM_DATA_STREAM_NAME) - .setId("42") + DocWriteResponse indexToDataStreamResponse = prepareIndex(SYSTEM_DATA_STREAM_NAME).setId("42") .setSource("{ \"@timestamp\": \"2099-03-08T11:06:07.000Z\", \"name\": \"my-name\" }", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) - .execute() - .actionGet(); + .get(); assertThat(indexToDataStreamResponse.status().getStatus(), oneOf(200, 201)); // Index a doc so that a concrete backing index will be created - DocWriteResponse indexResponse = client().prepareIndex("my-index") - .setId("42") + DocWriteResponse indexResponse = prepareIndex("my-index").setId("42") .setSource("{ \"name\": \"my-name\" }", XContentType.JSON) .setOpType(DocWriteRequest.OpType.CREATE) - .execute() .get(); assertThat(indexResponse.status().getStatus(), oneOf(200, 201)); @@ -238,15 +233,10 @@ public Collection getSystemDataStreamDescriptors() { SYSTEM_DATA_STREAM_NAME, "a system data stream for testing", SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".system-data-stream"), - null, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".system-data-stream")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build(), Map.of(), Collections.singletonList("test"), new ExecutorNames(ThreadPool.Names.SYSTEM_CRITICAL_READ, ThreadPool.Names.SYSTEM_READ, ThreadPool.Names.SYSTEM_WRITE) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 5dbf52f33d7da..ab42d831c6545 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -115,16 +115,11 @@ public void testTimeRanges() throws Exception { if (randomBoolean()) { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } else { @@ -134,16 +129,12 @@ public void testTimeRanges() throws Exception { var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), null, null), - List.of("1"), - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), null, null)) + .componentTemplates(List.of("1")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); } @@ -249,20 +240,17 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( IllegalArgumentException.class, @@ -280,20 +268,17 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce { var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( InvalidIndexTemplateException.class, @@ -317,20 +302,17 @@ public void testInvalidTsdbTemplatesNoKeywordFieldType() throws Exception { }"""; var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); Exception e = expectThrows( IllegalArgumentException.class, @@ -360,20 +342,17 @@ public void testInvalidTsdbTemplatesMissingSettings() throws Exception { }"""; var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template( - Settings.builder().put("index.routing_path", "metricset").build(), - new CompressedXContent(mappingTemplate), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template( + new Template( + Settings.builder().put("index.routing_path", "metricset").build(), + new CompressedXContent(mappingTemplate), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); var e = expectThrows( IllegalArgumentException.class, @@ -389,16 +368,11 @@ public void testSkippingShards() throws Exception { var templateSettings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(); var request = new PutComposableIndexTemplateAction.Request("id1"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("pattern-1"), - new Template(templateSettings, mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("pattern-1")) + .template(new Template(templateSettings, mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); var indexRequest = new IndexRequest("pattern-1").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); @@ -408,16 +382,11 @@ public void testSkippingShards() throws Exception { { var request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("pattern-2"), - new Template(null, mapping, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("pattern-2")) + .template(new Template(null, mapping, null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); var indexRequest = new IndexRequest("pattern-2").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); @@ -457,26 +426,23 @@ public void testTrimId() throws Exception { String dataStreamName = "k8s"; var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( - new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - new Template( - Settings.builder() - .put("index.mode", "time_series") - .put("index.number_of_replicas", 0) - // Reduce sync interval to speedup this integraton test, - // otherwise by default it will take 30 seconds before minimum retained seqno is updated: - .put("index.soft_deletes.retention_lease.sync_interval", "100ms") - .build(), - new CompressedXContent(MAPPING_TEMPLATE), - null - ), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template( + new Template( + Settings.builder() + .put("index.mode", "time_series") + .put("index.number_of_replicas", 0) + // Reduce sync interval to speedup this integraton test, + // otherwise by default it will take 30 seconds before minimum retained seqno is updated: + .put("index.soft_deletes.retention_lease.sync_interval", "100ms") + .build(), + new CompressedXContent(MAPPING_TEMPLATE), + null + ) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java index d2baec3150392..8e590d3f28346 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java @@ -199,20 +199,18 @@ public Collection getSystemDataStreamDescriptors() { ".test-data-stream", "system data stream test", Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(".test-data-stream"), - new Template( - Settings.EMPTY, - mappings, - null, - DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() - ), - null, - null, - null, - null, - new DataStreamTemplate() - ), + ComposableIndexTemplate.builder() + .indexPatterns(List.of(".test-data-stream")) + .template( + new Template( + Settings.EMPTY, + mappings, + null, + DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() + ) + ) + .dataStreamTemplate(new DataStreamTemplate()) + .build(), Map.of(), List.of("product"), ExecutorNames.DEFAULT_SYSTEM_DATA_STREAM_THREAD_POOLS diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index 0d3588ba20b9a..7ac86c8aee614 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -182,16 +182,10 @@ public void testOriginationDate() throws Exception { }"""; PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("index_*"), - new Template(null, CompressedXContent.fromJSON(mapping), null, null), - null, - null, - null, - null, - null, - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("index_*")) + .template(new Template(null, CompressedXContent.fromJSON(mapping), null, null)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); @@ -628,35 +622,6 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception }); } - private static List getBackingIndices(String dataStreamName) { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); - return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); - } - - static void indexDocs(String dataStream, int numDocs) { - BulkRequest bulkRequest = new BulkRequest(); - for (int i = 0; i < numDocs; i++) { - String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); - bulkRequest.add( - new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) - .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) - ); - } - BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); - assertThat(bulkResponse.getItems().length, equalTo(numDocs)); - String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; - for (BulkItemResponse itemResponse : bulkResponse) { - assertThat(itemResponse.getFailureMessage(), nullValue()); - assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); - assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); - } - indicesAdmin().refresh(new RefreshRequest(dataStream)).actionGet(); - } - public void testReenableDataStreamLifecycle() throws Exception { // start with a lifecycle that's not enabled DataStreamLifecycle lifecycle = new DataStreamLifecycle(null, null, false); @@ -706,6 +671,35 @@ public void testReenableDataStreamLifecycle() throws Exception { }); } + private static List getBackingIndices(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); + return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); + } + + static void indexDocs(String dataStream, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) + ); + } + BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(numDocs)); + String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; + for (BulkItemResponse itemResponse : bulkResponse) { + assertThat(itemResponse.getFailureMessage(), nullValue()); + assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); + assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); + } + indicesAdmin().refresh(new RefreshRequest(dataStream)).actionGet(); + } + static void putComposableIndexTemplate( String id, @Nullable String mappings, @@ -716,16 +710,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index c9968a545cb7d..57febae28bb4d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -351,16 +351,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index b150c71c86122..b8e79d2fec7cd 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.List; @@ -25,6 +26,14 @@ public class LogsDataStreamIT extends DisabledSecurityDataStreamTestCase { + private RestClient client; + + @Before + public void setup() throws Exception { + client = client(); + waitForLogs(client); + } + @After public void cleanUp() throws IOException { adminClient().performRequest(new Request("DELETE", "_data_stream/*")); @@ -32,9 +41,6 @@ public void cleanUp() throws IOException { @SuppressWarnings("unchecked") public void testDefaultLogsSettingAndMapping() throws Exception { - RestClient client = client(); - waitForLogs(client); - String dataStreamName = "logs-generic-default"; createDataStream(client, dataStreamName); String backingIndex = getWriteBackingIndex(client, dataStreamName); @@ -104,9 +110,6 @@ public void testDefaultLogsSettingAndMapping() throws Exception { @SuppressWarnings("unchecked") public void testCustomMapping() throws Exception { - RestClient client = client(); - waitForLogs(client); - { Request request = new Request("POST", "/_component_template/logs@custom"); request.setJsonEntity(""" @@ -182,9 +185,6 @@ public void testCustomMapping() throws Exception { @SuppressWarnings("unchecked") public void testLogsDefaultPipeline() throws Exception { - RestClient client = client(); - waitForLogs(client); - { Request request = new Request("POST", "/_component_template/logs@custom"); request.setJsonEntity(""" @@ -284,9 +284,6 @@ public void testLogsDefaultPipeline() throws Exception { @SuppressWarnings("unchecked") public void testLogsMessagePipeline() throws Exception { - RestClient client = client(); - waitForLogs(client); - { Request request = new Request("PUT", "/_ingest/pipeline/logs@custom"); request.setJsonEntity(""" @@ -412,8 +409,6 @@ public void testLogsMessagePipeline() throws Exception { @SuppressWarnings("unchecked") public void testNoSubobjects() throws Exception { - RestClient client = client(); - waitForLogs(client); { Request request = new Request("POST", "/_component_template/logs-test-subobjects-mappings"); request.setJsonEntity(""" @@ -633,6 +628,94 @@ public void testNoSubobjects() throws Exception { } + public void testAllFieldsAreSearchableByDefault() throws Exception { + final String dataStreamName = "logs-generic-default"; + createDataStream(client, dataStreamName); + + // index a doc with "message" field and an additional one that will be mapped to a "match_only_text" type + indexDoc(client, dataStreamName, """ + { + "@timestamp": "2023-04-18", + "message": "Hello world", + "another.message": "Hi world" + } + """); + + // verify that both fields are searchable when not querying specific fields + List results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hello" + } + } + } + """); + assertEquals(1, results.size()); + + results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hi" + } + } + } + """); + assertEquals(1, results.size()); + } + + public void testDefaultFieldCustomization() throws Exception { + Request request = new Request("POST", "/_component_template/logs@custom"); + request.setJsonEntity(""" + { + "template": { + "settings": { + "index": { + "query": { + "default_field": ["message"] + } + } + } + } + } + """); + assertOK(client.performRequest(request)); + + final String dataStreamName = "logs-generic-default"; + createDataStream(client, dataStreamName); + + indexDoc(client, dataStreamName, """ + { + "@timestamp": "2023-04-18", + "message": "Hello world", + "another.message": "Hi world" + } + """); + + List results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hello" + } + } + } + """); + assertEquals(1, results.size()); + + results = searchDocs(client, dataStreamName, """ + { + "query": { + "simple_query_string": { + "query": "Hi" + } + } + } + """); + assertEquals(0, results.size()); + } + static void waitForLogs(RestClient client) throws Exception { assertBusy(() -> { try { diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java new file mode 100644 index 0000000000000..cce9132d99d19 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleStatsIT.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; + +public class DataStreamLifecycleStatsIT extends DisabledSecurityDataStreamTestCase { + + @Before + public void updateClusterSettings() throws IOException { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.poll_interval", "1s") + .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") + .build() + ); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*?expand_wildcards=hidden")); + } + + @SuppressWarnings("unchecked") + public void testStats() throws Exception { + // Check empty stats and wait until we have 2 executions + assertBusy(() -> { + Request request = new Request("GET", "/_lifecycle/stats"); + Map response = entityAsMap(client().performRequest(request)); + assertThat(response.get("data_stream_count"), is(0)); + assertThat(response.get("data_streams"), is(List.of())); + assertThat(response.containsKey("last_run_duration_in_millis"), is(true)); + assertThat(response.containsKey("time_between_starts_in_millis"), is(true)); + }); + + // Create a template + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["my-data-stream-*"], + "data_stream": {}, + "template": { + "lifecycle": {} + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + // Create two data streams with one doc each + Request createDocRequest = new Request("POST", "/my-data-stream-1/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + createDocRequest = new Request("POST", "/my-data-stream-2/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + + Request request = new Request("GET", "/_lifecycle/stats"); + Map response = entityAsMap(client().performRequest(request)); + assertThat(response.get("data_stream_count"), is(2)); + List> dataStreams = (List>) response.get("data_streams"); + assertThat(dataStreams.get(0).get("name"), is("my-data-stream-1")); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_total"), greaterThanOrEqualTo(1)); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_error"), is(0)); + assertThat(dataStreams.get(1).get("name"), is("my-data-stream-2")); + assertThat((Integer) dataStreams.get(1).get("backing_indices_in_total"), greaterThanOrEqualTo(1)); + assertThat((Integer) dataStreams.get(0).get("backing_indices_in_error"), is(0)); + assertThat(response.containsKey("last_run_duration_in_millis"), is(true)); + assertThat(response.containsKey("time_between_starts_in_millis"), is(true)); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 2cf44dc0e3218..dd8e13cf18408 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -40,11 +40,14 @@ import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.TransportPutDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.rest.RestDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.rest.RestDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.rest.RestExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.rest.RestGetDataStreamLifecycleAction; @@ -189,6 +192,7 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(GetDataStreamLifecycleAction.INSTANCE, TransportGetDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(DeleteDataStreamLifecycleAction.INSTANCE, TransportDeleteDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(ExplainDataStreamLifecycleAction.INSTANCE, TransportExplainDataStreamLifecycleAction.class)); + actions.add(new ActionHandler<>(GetDataStreamLifecycleStatsAction.INSTANCE, TransportGetDataStreamLifecycleStatsAction.class)); return actions; } @@ -218,6 +222,7 @@ public List getRestHandlers( handlers.add(new RestGetDataStreamLifecycleAction()); handlers.add(new RestDeleteDataStreamLifecycleAction()); handlers.add(new RestExplainDataStreamLifecycleAction()); + handlers.add(new RestDataStreamLifecycleStatsAction()); return handlers; } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index de81ca9bef18c..e44ee5107711f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -136,19 +136,9 @@ static GetDataStreamAction.Response innerOperation( Map backingIndicesSettingsValues = new HashMap<>(); Metadata metadata = state.getMetadata(); - for (Index index : dataStream.getIndices()) { - IndexMetadata indexMetadata = metadata.index(index); - Boolean preferIlm = PREFER_ILM_SETTING.get(indexMetadata.getSettings()); - assert preferIlm != null : "must use the default prefer ilm setting value, if nothing else"; - ManagedBy managedBy; - if (metadata.isIndexManagedByILM(indexMetadata)) { - managedBy = ManagedBy.ILM; - } else if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { - managedBy = ManagedBy.LIFECYCLE; - } else { - managedBy = ManagedBy.UNMANAGED; - } - backingIndicesSettingsValues.put(index, new IndexProperties(preferIlm, indexMetadata.getLifecyclePolicyName(), managedBy)); + collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices()); + if (DataStream.isFailureStoreEnabled() && dataStream.getFailureIndices().isEmpty() == false) { + collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices()); } GetDataStreamAction.Response.TimeSeries timeSeries = null; @@ -213,6 +203,28 @@ static GetDataStreamAction.Response innerOperation( ); } + private static void collectIndexSettingsValues( + DataStream dataStream, + Map backingIndicesSettingsValues, + Metadata metadata, + List backingIndices + ) { + for (Index index : backingIndices) { + IndexMetadata indexMetadata = metadata.index(index); + Boolean preferIlm = PREFER_ILM_SETTING.get(indexMetadata.getSettings()); + assert preferIlm != null : "must use the default prefer ilm setting value, if nothing else"; + ManagedBy managedBy; + if (metadata.isIndexManagedByILM(indexMetadata)) { + managedBy = ManagedBy.ILM; + } else if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { + managedBy = ManagedBy.LIFECYCLE; + } else { + managedBy = ManagedBy.UNMANAGED; + } + backingIndicesSettingsValues.put(index, new IndexProperties(preferIlm, indexMetadata.getLifecyclePolicyName(), managedBy)); + } + } + static List getDataStreams( ClusterState clusterState, IndexNameExpressionResolver iner, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java index 47589fd7276f4..01ccbdbe3ffec 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; -import java.util.List; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.LongSupplier; @@ -87,7 +87,7 @@ public ErrorEntry getError(String indexName) { /** * Return an immutable view (a snapshot) of the tracked indices at the moment this method is called. */ - public List getAllIndices() { - return List.copyOf(indexNameToError.keySet()); + public Set getAllIndices() { + return Set.copyOf(indexNameToError.keySet()); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 03d1340c14dbb..9f9a90704167d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -175,6 +175,13 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab */ private volatile int signallingErrorRetryInterval; + /** + * The following stats are tracking how the data stream lifecycle runs are performing time wise + */ + private volatile Long lastRunStartedAt = null; + private volatile Long lastRunDuration = null; + private volatile Long timeBetweenStarts = null; + private static final SimpleBatchedExecutor FORCE_MERGE_STATE_UPDATE_TASK_EXECUTOR = new SimpleBatchedExecutor<>() { @Override @@ -299,6 +306,11 @@ public void triggered(SchedulerEngine.Event event) { */ // default visibility for testing purposes void run(ClusterState state) { + long startTime = nowSupplier.getAsLong(); + if (lastRunStartedAt != null) { + timeBetweenStarts = startTime - lastRunStartedAt; + } + lastRunStartedAt = startTime; int affectedIndices = 0; int affectedDataStreams = 0; for (DataStream dataStream : state.metadata().dataStreams().values()) { @@ -396,8 +408,10 @@ void run(ClusterState state) { affectedIndices += indicesToExcludeForRemainingRun.size(); affectedDataStreams++; } + lastRunDuration = nowSupplier.getAsLong() - lastRunStartedAt; logger.trace( - "Data stream lifecycle service performed operations on [{}] indices, part of [{}] data streams", + "Data stream lifecycle service run for {} and performed operations on [{}] indices, part of [{}] data streams", + TimeValue.timeValueMillis(lastRunDuration).toHumanReadableString(2), affectedIndices, affectedDataStreams ); @@ -1193,6 +1207,22 @@ static TimeValue getRetentionConfiguration(DataStream dataStream) { return dataStream.getLifecycle().getEffectiveDataRetention(); } + /** + * @return the duration of the last run in millis or null if the service hasn't completed a run yet. + */ + @Nullable + public Long getLastRunDuration() { + return lastRunDuration; + } + + /** + * @return the time passed between the start times of the last two consecutive runs or null if the service hasn't started twice yet. + */ + @Nullable + public Long getTimeBetweenStarts() { + return timeBetweenStarts; + } + /** * Action listener that records the encountered failure using the provided recordError callback for the * provided target index. If the listener is notified of success it will clear the recorded entry for the provided diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..c3444a67b847c --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; + +/** + * This action retrieves the data stream lifecycle stats from the master node. + */ +public class GetDataStreamLifecycleStatsAction extends ActionType { + + public static final GetDataStreamLifecycleStatsAction INSTANCE = new GetDataStreamLifecycleStatsAction(); + public static final String NAME = "cluster:monitor/data_stream/lifecycle/stats"; + + private GetDataStreamLifecycleStatsAction() { + super(NAME, Response::new); + } + + public static class Request extends MasterNodeReadRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() {} + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class Response extends ActionResponse implements ChunkedToXContentObject { + + private final Long runDuration; + private final Long timeBetweenStarts; + private final List dataStreamStats; + + public Response(@Nullable Long runDuration, @Nullable Long timeBetweenStarts, List dataStreamStats) { + this.runDuration = runDuration; + this.timeBetweenStarts = timeBetweenStarts; + this.dataStreamStats = dataStreamStats; + } + + public Response(StreamInput in) throws IOException { + super(in); + this.runDuration = in.readOptionalVLong(); + this.timeBetweenStarts = in.readOptionalVLong(); + this.dataStreamStats = in.readCollectionAsImmutableList(DataStreamStats::read); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVLong(runDuration); + out.writeOptionalVLong(timeBetweenStarts); + out.writeCollection(dataStreamStats, (o, v) -> v.writeTo(o)); + } + + public Long getRunDuration() { + return runDuration; + } + + public Long getTimeBetweenStarts() { + return timeBetweenStarts; + } + + public List getDataStreamStats() { + return dataStreamStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response other = (Response) o; + return Objects.equals(runDuration, other.runDuration) + && Objects.equals(timeBetweenStarts, other.timeBetweenStarts) + && Objects.equals(dataStreamStats, other.dataStreamStats); + } + + @Override + public int hashCode() { + return Objects.hash(runDuration, timeBetweenStarts, dataStreamStats); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params outerParams) { + return Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(); + if (runDuration != null) { + builder.field("last_run_duration_in_millis", runDuration); + if (builder.humanReadable()) { + builder.field("last_run_duration", TimeValue.timeValueMillis(runDuration).toHumanReadableString(2)); + } + } + if (timeBetweenStarts != null) { + builder.field("time_between_starts_in_millis", timeBetweenStarts); + if (builder.humanReadable()) { + builder.field("time_between_starts", TimeValue.timeValueMillis(timeBetweenStarts).toHumanReadableString(2)); + } + } + builder.field("data_stream_count", dataStreamStats.size()); + builder.startArray("data_streams"); + return builder; + }), Iterators.map(dataStreamStats.iterator(), stat -> (builder, params) -> { + builder.startObject(); + builder.field("name", stat.dataStreamName); + builder.field("backing_indices_in_total", stat.backingIndicesInTotal); + builder.field("backing_indices_in_error", stat.backingIndicesInError); + builder.endObject(); + return builder; + }), Iterators.single((builder, params) -> { + builder.endArray(); + builder.endObject(); + return builder; + })); + } + + public record DataStreamStats(String dataStreamName, int backingIndicesInTotal, int backingIndicesInError) implements Writeable { + + public static DataStreamStats read(StreamInput in) throws IOException { + return new DataStreamStats(in.readString(), in.readVInt(), in.readVInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(dataStreamName); + out.writeVInt(backingIndicesInTotal); + out.writeVInt(backingIndicesInError); + } + } + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..03bc1d129eaba --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Set; + +/** + * Exposes stats about the latest lifecycle run and the error store. + */ +public class TransportGetDataStreamLifecycleStatsAction extends TransportMasterNodeReadAction< + GetDataStreamLifecycleStatsAction.Request, + GetDataStreamLifecycleStatsAction.Response> { + + private final DataStreamLifecycleService lifecycleService; + + @Inject + public TransportGetDataStreamLifecycleStatsAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DataStreamLifecycleService lifecycleService + ) { + super( + GetDataStreamLifecycleStatsAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetDataStreamLifecycleStatsAction.Request::new, + indexNameExpressionResolver, + GetDataStreamLifecycleStatsAction.Response::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + this.lifecycleService = lifecycleService; + } + + @Override + protected void masterOperation( + Task task, + GetDataStreamLifecycleStatsAction.Request request, + ClusterState state, + ActionListener listener + ) throws Exception { + listener.onResponse(collectStats(state)); + } + + // Visible for testing + GetDataStreamLifecycleStatsAction.Response collectStats(ClusterState state) { + Metadata metadata = state.metadata(); + Set indicesInErrorStore = lifecycleService.getErrorStore().getAllIndices(); + List dataStreamStats = new ArrayList<>(); + for (DataStream dataStream : state.metadata().dataStreams().values()) { + if (dataStream.getLifecycle() != null && dataStream.getLifecycle().isEnabled()) { + int total = 0; + int inError = 0; + for (Index index : dataStream.getIndices()) { + if (dataStream.isIndexManagedByDataStreamLifecycle(index, metadata::index)) { + total++; + if (indicesInErrorStore.contains(index.getName())) { + inError++; + } + } + } + dataStreamStats.add(new GetDataStreamLifecycleStatsAction.Response.DataStreamStats(dataStream.getName(), total, inError)); + } + } + return new GetDataStreamLifecycleStatsAction.Response( + lifecycleService.getLastRunDuration(), + lifecycleService.getTimeBetweenStarts(), + dataStreamStats.isEmpty() + ? dataStreamStats + : dataStreamStats.stream() + .sorted(Comparator.comparing(GetDataStreamLifecycleStatsAction.Response.DataStreamStats::dataStreamName)) + .toList() + ); + } + + @Override + protected ClusterBlockException checkBlock(GetDataStreamLifecycleStatsAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java new file mode 100644 index 0000000000000..2daff2a05940c --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +@ServerlessScope(Scope.PUBLIC) +public class RestDataStreamLifecycleStatsAction extends BaseRestHandler { + + @Override + public String getName() { + return "data_stream_lifecycle_stats_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_lifecycle/stats")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String masterNodeTimeout = restRequest.param("master_timeout"); + GetDataStreamLifecycleStatsAction.Request request = new GetDataStreamLifecycleStatsAction.Request(); + if (masterNodeTimeout != null) { + request.masterNodeTimeout(masterNodeTimeout); + } + return channel -> client.execute(GetDataStreamLifecycleStatsAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 23a86b657b82d..e622d16b5d4c9 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -315,7 +315,9 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi ds.isSystem(), ds.isAllowCustomRouting(), IndexMode.TIME_SERIES, - ds.getLifecycle() + ds.getLifecycle(), + ds.isFailureStore(), + ds.getFailureIndices() ) ); Metadata metadata = mb.build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index da0caff9e591d..928512f659039 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -226,16 +226,11 @@ private String createDataStream(boolean hidden) throws Exception { Template idxTemplate = new Template(null, new CompressedXContent(""" {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} """), null); - ComposableIndexTemplate template = new ComposableIndexTemplate( - List.of(dataStreamName + "*"), - idxTemplate, - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(hidden, false), - null - ); + ComposableIndexTemplate template = ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName + "*")) + .template(idxTemplate) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(hidden, false)) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index 4f36feba17c89..e7339cc3f334a 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -59,16 +59,13 @@ public void testRequireRoutingPath() throws Exception { // Missing routing path should fail validation var componentTemplate = new ComponentTemplate(new Template(null, new CompressedXContent("{}"), null), null, null); var state = service.addComponentTemplate(ClusterState.EMPTY_STATE, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var e = expectThrows(InvalidIndexTemplateException.class, () -> service.addIndexTemplateV2(state, false, "1", indexTemplate)); assertThat(e.getMessage(), containsString("[index.mode=time_series] requires a non-empty [index.routing_path]")); } @@ -81,16 +78,13 @@ public void testRequireRoutingPath() throws Exception { null ); state = service.addComponentTemplate(state, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); state = service.addIndexTemplateV2(state, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } @@ -103,46 +97,39 @@ public void testRequireRoutingPath() throws Exception { null ); state = service.addComponentTemplate(state, true, "1", componentTemplate); - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(null, null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(null, null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); state = service.addIndexTemplateV2(state, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } { // Routing path defined in index template - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template(new Template(builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null)) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var state = service.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } { // Routing fetched from mapping in index template - var indexTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template(builder().put("index.mode", "time_series").build(), new CompressedXContent(generateTsdbMapping()), null), - List.of("1"), - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); + var indexTemplate = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("logs-*-*")) + .template( + new Template(builder().put("index.mode", "time_series").build(), new CompressedXContent(generateTsdbMapping()), null) + ) + .componentTemplates(List.of("1")) + .priority(100L) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); var state = service.addIndexTemplateV2(ClusterState.EMPTY_STATE, false, "1", indexTemplate); assertThat(state.getMetadata().templatesV2().get("1"), equalTo(indexTemplate)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 803f5c8661f17..1a9287c1d5ee8 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -96,16 +96,11 @@ private void createTemplate(boolean tsdb) throws IOException { var templateSettings = Settings.builder().put("index.mode", tsdb ? "time_series" : "standard"); var request = new PutComposableIndexTemplateAction.Request("id"); request.indexTemplate( - new ComposableIndexTemplate( - List.of("k8s*"), - new Template(templateSettings.build(), new CompressedXContent(mappingTemplate), null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(List.of("k8s*")) + .template(new Template(templateSettings.build(), new CompressedXContent(mappingTemplate), null)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() ); client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index 989bebc68061d..c383991dba19c 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -151,7 +151,9 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() { d.isSystem(), d.isAllowCustomRouting(), d.getIndexMode(), - d.getLifecycle() + d.getLifecycle(), + d.isFailureStore(), + d.getFailureIndices() ) ) .build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 12e1604d10c1f..5ebea62fc596a 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -33,6 +33,7 @@ import java.util.Map; import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; +import static org.elasticsearch.cluster.metadata.DataStream.getDefaultFailureStoreName; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -63,14 +64,16 @@ protected Response mutateInstance(Response instance) { @SuppressWarnings("unchecked") public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Exception { - // we'll test a data stream with 3 backing indices - two managed by ILM (having the ILM policy configured for them) - // and one without any ILM policy configured + // we'll test a data stream with 3 backing indices and a failure store - two backing indices managed by ILM (having the ILM policy + // configured for them) and the remainder without any ILM policy configured String dataStreamName = "logs"; Index firstGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 1), UUIDs.base64UUID()); Index secondGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 2), UUIDs.base64UUID()); Index writeIndex = new Index(getDefaultBackingIndexName(dataStreamName, 3), UUIDs.base64UUID()); + Index failureStoreIndex = new Index(getDefaultFailureStoreName(dataStreamName, 1, System.currentTimeMillis()), UUIDs.base64UUID()); List indices = List.of(firstGenerationIndex, secondGenerationIndex, writeIndex); + List failureStores = List.of(failureStoreIndex); { // data stream has an enabled lifecycle DataStream logs = new DataStream( @@ -83,7 +86,9 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti false, true, IndexMode.STANDARD, - new DataStreamLifecycle() + new DataStreamLifecycle(), + true, + failureStores ); String ilmPolicyName = "rollover-30days"; @@ -93,6 +98,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti secondGenerationIndex, new Response.IndexProperties(false, ilmPolicyName, ManagedBy.LIFECYCLE), writeIndex, + new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE), + failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE) ); @@ -156,6 +163,18 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.LIFECYCLE.displayValue) ); + + List failureStoresRepresentation = (List) dataStreamMap.get( + DataStream.FAILURE_INDICES_FIELD.getPreferredName() + ); + Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); + assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue())); + assertThat( + failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), + is(ManagedBy.LIFECYCLE.displayValue) + ); } } @@ -171,7 +190,9 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti false, true, IndexMode.STANDARD, - new DataStreamLifecycle(null, null, false) + new DataStreamLifecycle(null, null, false), + true, + failureStores ); String ilmPolicyName = "rollover-30days"; @@ -181,6 +202,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti secondGenerationIndex, new Response.IndexProperties(true, ilmPolicyName, ManagedBy.ILM), writeIndex, + new Response.IndexProperties(false, null, ManagedBy.UNMANAGED), + failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.UNMANAGED) ); @@ -233,6 +256,18 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.UNMANAGED.displayValue) ); + + List failureStoresRepresentation = (List) dataStreamMap.get( + DataStream.FAILURE_INDICES_FIELD.getPreferredName() + ); + Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); + assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); + assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue())); + assertThat( + failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), + is(ManagedBy.UNMANAGED.displayValue) + ); } } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java index c1255cc9e3a72..9f1928374eb5f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java @@ -12,12 +12,13 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.List; +import java.util.Set; import java.util.stream.Stream; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore.MAX_ERROR_MESSAGE_LENGTH; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -36,7 +37,7 @@ public void testRecordAndRetrieveError() { assertThat(existingRecordedError, is(nullValue())); assertThat(errorStore.getError("test"), is(notNullValue())); assertThat(errorStore.getAllIndices().size(), is(1)); - assertThat(errorStore.getAllIndices().get(0), is("test")); + assertThat(errorStore.getAllIndices(), hasItem("test")); existingRecordedError = errorStore.recordError("test", new IllegalStateException("bad state")); assertThat(existingRecordedError, is(notNullValue())); @@ -51,7 +52,7 @@ public void testRetrieveAfterClear() { public void testGetAllIndicesIsASnapshotViewOfTheStore() { Stream.iterate(0, i -> i + 1).limit(5).forEach(i -> errorStore.recordError("test" + i, new NullPointerException("testing"))); - List initialAllIndices = errorStore.getAllIndices(); + Set initialAllIndices = errorStore.getAllIndices(); assertThat(initialAllIndices.size(), is(5)); assertThat( initialAllIndices, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java index 5a15e831f5ad6..6833f2222b585 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java @@ -83,16 +83,12 @@ static void putComposableIndexTemplate( ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( - new ComposableIndexTemplate( - patterns, - new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), - null, - null, - null, - metadata, - new ComposableIndexTemplate.DataStreamTemplate(), - null - ) + ComposableIndexTemplate.builder() + .indexPatterns(patterns) + .template(new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle)) + .metadata(metadata) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .build() ); assertTrue(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet().isAcknowledged()); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 0ee168d130986..2445e6b0d72df 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -94,6 +94,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; @@ -119,6 +120,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class DataStreamLifecycleServiceTests extends ESTestCase { @@ -280,7 +282,9 @@ public void testRetentionNotExecutedForTSIndicesWithinTimeBounds() { dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - DataStreamLifecycle.newBuilder().dataRetention(0L).build() + DataStreamLifecycle.newBuilder().dataRetention(0L).build(), + dataStream.isFailureStore(), + dataStream.getFailureIndices() ) ); clusterState = ClusterState.builder(clusterState).metadata(builder).build(); @@ -1376,6 +1380,31 @@ public void testTimeSeriesIndicesStillWithinTimeBounds() { } } + public void testTrackingTimeStats() { + AtomicLong now = new AtomicLong(0); + long delta = randomLongBetween(10, 10000); + DataStreamLifecycleService service = new DataStreamLifecycleService( + Settings.EMPTY, + getTransportRequestsRecordingClient(), + clusterService, + Clock.systemUTC(), + threadPool, + () -> now.getAndAdd(delta), + new DataStreamLifecycleErrorStore(() -> Clock.systemUTC().millis()), + mock(AllocationService.class) + ); + assertThat(service.getLastRunDuration(), is(nullValue())); + assertThat(service.getTimeBetweenStarts(), is(nullValue())); + + service.run(ClusterState.EMPTY_STATE); + assertThat(service.getLastRunDuration(), is(delta)); + assertThat(service.getTimeBetweenStarts(), is(nullValue())); + + service.run(ClusterState.EMPTY_STATE); + assertThat(service.getLastRunDuration(), is(delta)); + assertThat(service.getTimeBetweenStarts(), is(2 * delta)); + } + /* * Creates a test cluster state with the given indexName. If customDataStreamLifecycleMetadata is not null, it is added as the value * of the index's custom metadata named "data_stream_lifecycle". diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java new file mode 100644 index 0000000000000..111d1b61da8c9 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamLifecycleStatsResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected GetDataStreamLifecycleStatsAction.Response createTestInstance() { + boolean hasRun = usually(); + var runDuration = hasRun ? randomLongBetween(10, 100000000) : null; + var timeBetweenStarts = hasRun && usually() ? randomLongBetween(10, 100000000) : null; + var dataStreams = IntStream.range(0, randomInt(10)) + .mapToObj( + ignored -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + randomAlphaOfLength(10), + randomIntBetween(1, 1000), + randomIntBetween(0, 100) + ) + ) + .toList(); + return new GetDataStreamLifecycleStatsAction.Response(runDuration, timeBetweenStarts, dataStreams); + } + + @Override + protected GetDataStreamLifecycleStatsAction.Response mutateInstance(GetDataStreamLifecycleStatsAction.Response instance) { + var runDuration = instance.getRunDuration(); + var timeBetweenStarts = instance.getTimeBetweenStarts(); + var dataStreams = instance.getDataStreamStats(); + switch (randomInt(2)) { + case 0 -> runDuration = runDuration != null && randomBoolean() + ? null + : randomValueOtherThan(runDuration, () -> randomLongBetween(10, 100000000)); + case 1 -> timeBetweenStarts = timeBetweenStarts != null && randomBoolean() + ? null + : randomValueOtherThan(timeBetweenStarts, () -> randomLongBetween(10, 100000000)); + default -> dataStreams = mutateDataStreamStats(dataStreams); + } + return new GetDataStreamLifecycleStatsAction.Response(runDuration, timeBetweenStarts, dataStreams); + } + + private List mutateDataStreamStats( + List dataStreamStats + ) { + // change the stats of a data stream + List mutated = new ArrayList<>(dataStreamStats); + if (randomBoolean() && dataStreamStats.isEmpty() == false) { + int i = randomInt(dataStreamStats.size() - 1); + GetDataStreamLifecycleStatsAction.Response.DataStreamStats instance = dataStreamStats.get(i); + mutated.set(i, switch (randomInt(2)) { + case 0 -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName() + randomAlphaOfLength(2), + instance.backingIndicesInTotal(), + instance.backingIndicesInError() + ); + case 1 -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName(), + instance.backingIndicesInTotal() + randomIntBetween(1, 10), + instance.backingIndicesInError() + ); + default -> new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + instance.dataStreamName(), + instance.backingIndicesInTotal(), + instance.backingIndicesInError() + randomIntBetween(1, 10) + ); + + }); + } else if (dataStreamStats.isEmpty() || randomBoolean()) { + mutated.add( + new GetDataStreamLifecycleStatsAction.Response.DataStreamStats( + randomAlphaOfLength(10), + randomIntBetween(1, 1000), + randomIntBetween(0, 100) + ) + ); + } else { + mutated.remove(randomInt(dataStreamStats.size() - 1)); + } + return mutated; + } + + @SuppressWarnings("unchecked") + public void testXContentSerialization() throws IOException { + GetDataStreamLifecycleStatsAction.Response testInstance = createTestInstance(); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.humanReadable(true); + testInstance.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> { + try { + xcontent.toXContent(builder, EMPTY_PARAMS); + } catch (IOException e) { + logger.error(e.getMessage(), e); + fail(e.getMessage()); + } + }); + Map xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + if (testInstance.getRunDuration() == null) { + assertThat(xContentMap.get("last_run_duration_in_millis"), nullValue()); + assertThat(xContentMap.get("last_run_duration"), nullValue()); + } else { + assertThat(xContentMap.get("last_run_duration_in_millis"), is(testInstance.getRunDuration().intValue())); + assertThat( + xContentMap.get("last_run_duration"), + is(TimeValue.timeValueMillis(testInstance.getRunDuration()).toHumanReadableString(2)) + ); + } + + if (testInstance.getTimeBetweenStarts() == null) { + assertThat(xContentMap.get("time_between_starts_in_millis"), nullValue()); + assertThat(xContentMap.get("time_between_starts"), nullValue()); + } else { + assertThat(xContentMap.get("time_between_starts_in_millis"), is(testInstance.getTimeBetweenStarts().intValue())); + assertThat( + xContentMap.get("time_between_starts"), + is(TimeValue.timeValueMillis(testInstance.getTimeBetweenStarts()).toHumanReadableString(2)) + ); + } + assertThat(xContentMap.get("data_stream_count"), is(testInstance.getDataStreamStats().size())); + List> dataStreams = (List>) xContentMap.get("data_streams"); + if (testInstance.getDataStreamStats().isEmpty()) { + assertThat(dataStreams.isEmpty(), is(true)); + } else { + assertThat(dataStreams.size(), is(testInstance.getDataStreamStats().size())); + for (int i = 0; i < dataStreams.size(); i++) { + assertThat(dataStreams.get(i).get("name"), is(testInstance.getDataStreamStats().get(i).dataStreamName())); + assertThat( + dataStreams.get(i).get("backing_indices_in_total"), + is(testInstance.getDataStreamStats().get(i).backingIndicesInTotal()) + ); + assertThat( + dataStreams.get(i).get("backing_indices_in_error"), + is(testInstance.getDataStreamStats().get(i).backingIndicesInError()) + ); + } + } + } + } + + @Override + protected Writeable.Reader instanceReader() { + return GetDataStreamLifecycleStatsAction.Response::new; + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java new file mode 100644 index 0000000000000..8c423107ea2f4 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsActionTests.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.action; + +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.Before; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; +import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleFixtures.createDataStream; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportGetDataStreamLifecycleStatsActionTests extends ESTestCase { + + private final DataStreamLifecycleService dataStreamLifecycleService = mock(DataStreamLifecycleService.class); + private final DataStreamLifecycleErrorStore errorStore = mock(DataStreamLifecycleErrorStore.class); + private final TransportGetDataStreamLifecycleStatsAction action = new TransportGetDataStreamLifecycleStatsAction( + mock(TransportService.class), + mock(ClusterService.class), + mock(ThreadPool.class), + mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), + dataStreamLifecycleService + ); + private Long lastRunDuration; + private Long timeBetweenStarts; + + @Before + public void setUp() throws Exception { + super.setUp(); + lastRunDuration = randomBoolean() ? randomLongBetween(0, 100000) : null; + timeBetweenStarts = randomBoolean() ? randomLongBetween(0, 100000) : null; + when(dataStreamLifecycleService.getLastRunDuration()).thenReturn(lastRunDuration); + when(dataStreamLifecycleService.getTimeBetweenStarts()).thenReturn(timeBetweenStarts); + when(dataStreamLifecycleService.getErrorStore()).thenReturn(errorStore); + when(errorStore.getAllIndices()).thenReturn(Set.of()); + } + + public void testEmptyClusterState() { + GetDataStreamLifecycleStatsAction.Response response = action.collectStats(ClusterState.EMPTY_STATE); + assertThat(response.getRunDuration(), is(lastRunDuration)); + assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts)); + assertThat(response.getDataStreamStats().isEmpty(), is(true)); + } + + public void testMixedDataStreams() { + Set indicesInError = new HashSet<>(); + int numBackingIndices = 3; + Metadata.Builder builder = Metadata.builder(); + DataStream ilmDataStream = createDataStream( + builder, + "ilm-managed-index", + numBackingIndices, + Settings.builder() + .put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()), + null, + Clock.systemUTC().millis() + ); + builder.put(ilmDataStream); + DataStream dslDataStream = createDataStream( + builder, + "dsl-managed-index", + numBackingIndices, + settings(IndexVersion.current()), + DataStreamLifecycle.newBuilder().dataRetention(TimeValue.timeValueDays(10)).build(), + Clock.systemUTC().millis() + ); + indicesInError.add(dslDataStream.getIndices().get(randomInt(numBackingIndices - 1)).getName()); + builder.put(dslDataStream); + { + String dataStreamName = "mixed"; + final List backingIndices = new ArrayList<>(); + for (int k = 1; k <= 2; k++) { + IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, k)) + .settings( + Settings.builder() + .put(IndexMetadata.LIFECYCLE_NAME, "ILM_policy") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + ) + .numberOfShards(1) + .numberOfReplicas(1) + .creationDate(Clock.systemUTC().millis()); + + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + } + // DSL managed write index + IndexMetadata.Builder indexMetaBuilder = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 3)) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(1) + .creationDate(Clock.systemUTC().millis()); + MaxAgeCondition rolloverCondition = new MaxAgeCondition(TimeValue.timeValueMillis(Clock.systemUTC().millis() - 2000L)); + indexMetaBuilder.putRolloverInfo( + new RolloverInfo(dataStreamName, List.of(rolloverCondition), Clock.systemUTC().millis() - 2000L) + ); + IndexMetadata indexMetadata = indexMetaBuilder.build(); + builder.put(indexMetadata, false); + backingIndices.add(indexMetadata.getIndex()); + builder.put(newInstance(dataStreamName, backingIndices, 3, null, false, DataStreamLifecycle.newBuilder().build())); + } + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); + when(errorStore.getAllIndices()).thenReturn(indicesInError); + GetDataStreamLifecycleStatsAction.Response response = action.collectStats(state); + assertThat(response.getRunDuration(), is(lastRunDuration)); + assertThat(response.getTimeBetweenStarts(), is(timeBetweenStarts)); + assertThat(response.getDataStreamStats().size(), is(2)); + for (GetDataStreamLifecycleStatsAction.Response.DataStreamStats stats : response.getDataStreamStats()) { + if (stats.dataStreamName().equals("dsl-managed-index")) { + assertThat(stats.backingIndicesInTotal(), is(3)); + assertThat(stats.backingIndicesInError(), is(1)); + } + if (stats.dataStreamName().equals("mixed")) { + assertThat(stats.backingIndicesInTotal(), is(1)); + assertThat(stats.backingIndicesInError(), is(0)); + } + } + } +} diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index b420e8421bfba..6496930764ab8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -206,6 +206,103 @@ setup: - do: indices.delete_index_template: name: my-template3 + +--- +"Create data stream with failure store": + - skip: + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" + + - do: + allowed_warnings: + - "index template [my-template4] has index patterns [failure-data-stream1, failure-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template4] will take precedence during new index creation" + indices.put_index_template: + name: my-template4 + body: + index_patterns: [ failure-data-stream1, failure-data-stream2 ] + data_stream: + failure_store: true + + - do: + indices.create_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: failure-data-stream2 + - is_true: acknowledged + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_data_stream: + name: "*" + - match: { data_streams.0.name: failure-data-stream1 } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 1 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.status: 'GREEN' } + - match: { data_streams.0.template: 'my-template4' } + - match: { data_streams.0.hidden: false } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/'} + + - match: { data_streams.1.name: failure-data-stream2 } + - match: { data_streams.1.timestamp_field.name: '@timestamp' } + - match: { data_streams.1.generation: 1 } + - length: { data_streams.1.indices: 1 } + - match: { data_streams.1.indices.0.index_name: '/\.ds-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.1.template: 'my-template4' } + - match: { data_streams.1.hidden: false } + - match: { data_streams.1.failure_store: true } + - length: { data_streams.1.failure_indices: 1 } + - match: { data_streams.1.failure_indices.0.index_name: '/\.fs-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + # save the backing index names for later use + - set: { data_streams.0.indices.0.index_name: idx0name } + - set: { data_streams.0.failure_indices.0.index_name: fsidx0name } + - set: { data_streams.1.indices.0.index_name: idx1name } + - set: { data_streams.1.failure_indices.0.index_name: fsidx1name } + + - do: + indices.get_mapping: + index: $idx0name + expand_wildcards: hidden + - match: { .$idx0name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $fsidx0name + expand_wildcards: hidden + - match: { .$fsidx0name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $idx1name + expand_wildcards: hidden + - match: { .$idx1name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.get_mapping: + index: $fsidx1name + expand_wildcards: hidden + - match: { .$fsidx1name.mappings.properties.@timestamp.type: 'date' } + + - do: + indices.delete_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.delete_data_stream: + name: failure-data-stream2 + - is_true: acknowledged + --- "Create data stream with invalid name": - skip: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml index 2a6beb4330e68..303a584555f8f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml @@ -46,3 +46,56 @@ indices.delete_data_stream: name: logs-foobar - is_true: acknowledged + +--- +"Put index template with failure store": + - skip: + version: " - 8.10.99" + reason: "data stream failure stores only creatable in 8.11+" + features: allowed_warnings + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + - do: + search: + index: logs-foobar + body: { query: { match_all: {} } } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.ds-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - match: { hits.hits.0._source.foo: 'bar' } + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged diff --git a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index 362c0c1887261..48cb155ac2970 100644 --- a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -14,10 +14,10 @@ import org.apache.tika.metadata.Office; import org.apache.tika.metadata.TikaCoreProperties; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; @@ -226,15 +226,6 @@ public static final class Factory implements Processor.Factory { static final Set DEFAULT_PROPERTIES = EnumSet.allOf(Property.class); - static { - if (Version.CURRENT.major >= 9) { - throw new IllegalStateException( - "[poison pill] update the [remove_binary] default to be 'true' assuming " - + "enough time has passed. Deprecated in September 2022." - ); - } - } - @Override public AttachmentProcessor create( Map registry, @@ -249,6 +240,7 @@ public AttachmentProcessor create( int indexedChars = readIntProperty(TYPE, processorTag, config, "indexed_chars", NUMBER_OF_CHARS_INDEXED); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); String indexedCharsField = readOptionalStringProperty(TYPE, processorTag, config, "indexed_chars_field"); + @UpdateForV9 // update the [remove_binary] default to be 'true' assuming enough time has passed. Deprecated in September 2022. Boolean removeBinary = readOptionalBooleanProperty(TYPE, processorTag, config, "remove_binary"); if (removeBinary == null) { DEPRECATION_LOGGER.warn( diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 5709fbd9d8bfc..0ff34cf687500 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -88,8 +88,7 @@ public void testFailureInConditionalProcessor() { Exception e = expectThrows( Exception.class, - () -> client().prepareIndex("index") - .setId("1") + () -> prepareIndex("index").setId("1") .setSource("x", 0) .setPipeline(pipelineId) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -144,8 +143,7 @@ public Settings onNodeStopped(String nodeName) { checkPipelineExists.accept(pipelineIdWithoutScript); checkPipelineExists.accept(pipelineIdWithScript); - client().prepareIndex("index") - .setId("1") + prepareIndex("index").setId("1") .setSource("x", 0) .setPipeline(pipelineIdWithoutScript) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -153,8 +151,7 @@ public Settings onNodeStopped(String nodeName) { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index") - .setId("2") + () -> prepareIndex("index").setId("2") .setSource("x", 0) .setPipeline(pipelineIdWithScript) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -194,12 +191,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio }"""); clusterAdmin().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); - client().prepareIndex("index") - .setId("1") - .setSource("x", 0) - .setPipeline("_id") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("index").setId("1").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); Map source = client().prepareGet("index", "1").get().getSource(); assertThat(source.get("x"), equalTo(0)); @@ -213,12 +205,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio internalCluster().fullRestart(); ensureYellow("index"); - client().prepareIndex("index") - .setId("2") - .setSource("x", 0) - .setPipeline("_id") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("index").setId("2").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); source = client().prepareGet("index", "2").get().getSource(); assertThat(source.get("x"), equalTo(0)); @@ -238,12 +225,7 @@ public void testWithDedicatedIngestNode() throws Exception { }"""); clusterAdmin().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); - client().prepareIndex("index") - .setId("1") - .setSource("x", 0) - .setPipeline("_id") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("index").setId("1").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); Map source = client().prepareGet("index", "1").get().getSource(); assertThat(source.get("x"), equalTo(0)); @@ -304,8 +286,7 @@ public boolean validateClusterForming() { assertThat( expectThrows( ClusterBlockException.class, - () -> client().prepareIndex("index") - .setId("fails") + () -> prepareIndex("index").setId("fails") .setSource("x", 1) .setTimeout(TimeValue.timeValueMillis(100)) // 100ms, to fail quickly .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -316,8 +297,7 @@ public boolean validateClusterForming() { // but this one should pass since it has a longer timeout final PlainActionFuture future = new PlainActionFuture<>(); - client().prepareIndex("index") - .setId("passes1") + prepareIndex("index").setId("passes1") .setSource("x", 2) .setTimeout(TimeValue.timeValueSeconds(60)) // wait for second node to start in below .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) @@ -331,7 +311,7 @@ public boolean validateClusterForming() { assertThat(indexResponse.status(), equalTo(RestStatus.CREATED)); assertThat(indexResponse.getResult(), equalTo(DocWriteResponse.Result.CREATED)); - client().prepareIndex("index").setId("passes2").setSource("x", 3).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index").setId("passes2").setSource("x", 3).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); // successfully indexed documents should have the value field set by the pipeline Map source = client().prepareGet("index", "passes1").get(timeout).getSource(); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml index 191b92806b6ce..e2f4e32777a1f 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml @@ -23,6 +23,9 @@ teardown: --- "Test first matching router terminates pipeline": + - skip: + version: all + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102144" - do: ingest.put_pipeline: id: "pipeline-with-two-data-stream-processors" diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 9a739132e5808..438b5f3f5efcd 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; @@ -69,6 +68,7 @@ import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -250,35 +250,43 @@ public void testGeoIpDatabasesDownload() throws Exception { state.getDatabases().keySet() ); GeoIpTaskState.Metadata metadata = state.get(id); - BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(new MatchQueryBuilder("name", id)) - .filter(new RangeQueryBuilder("chunk").from(metadata.firstChunk()).to(metadata.lastChunk(), true)); int size = metadata.lastChunk() - metadata.firstChunk() + 1; - SearchResponse res = prepareSearch(GeoIpDownloader.DATABASES_INDEX).setSize(size) - .setQuery(queryBuilder) - .addSort("chunk", SortOrder.ASC) - .get(); - TotalHits totalHits = res.getHits().getTotalHits(); - assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation); - assertEquals(size, totalHits.value); - assertEquals(size, res.getHits().getHits().length); - - List data = new ArrayList<>(); - - for (SearchHit hit : res.getHits().getHits()) { - data.add((byte[]) hit.getSourceAsMap().get("data")); - } - - TarInputStream stream = new TarInputStream(new GZIPInputStream(new MultiByteArrayInputStream(data))); - TarInputStream.TarEntry entry; - while ((entry = stream.getNextEntry()) != null) { - if (entry.name().endsWith(".mmdb")) { - break; + assertResponse( + prepareSearch(GeoIpDownloader.DATABASES_INDEX).setSize(size) + .setQuery( + new BoolQueryBuilder().filter(new MatchQueryBuilder("name", id)) + .filter(new RangeQueryBuilder("chunk").from(metadata.firstChunk()).to(metadata.lastChunk(), true)) + ) + .addSort("chunk", SortOrder.ASC), + res -> { + try { + TotalHits totalHits = res.getHits().getTotalHits(); + assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation); + assertEquals(size, totalHits.value); + assertEquals(size, res.getHits().getHits().length); + + List data = new ArrayList<>(); + + for (SearchHit hit : res.getHits().getHits()) { + data.add((byte[]) hit.getSourceAsMap().get("data")); + } + + TarInputStream stream = new TarInputStream(new GZIPInputStream(new MultiByteArrayInputStream(data))); + TarInputStream.TarEntry entry; + while ((entry = stream.getNextEntry()) != null) { + if (entry.name().endsWith(".mmdb")) { + break; + } + } + + Path tempFile = createTempFile(); + Files.copy(stream, tempFile, StandardCopyOption.REPLACE_EXISTING); + parseDatabase(tempFile); + } catch (Exception e) { + fail(e); + } } - } - - Path tempFile = createTempFile(); - Files.copy(stream, tempFile, StandardCopyOption.REPLACE_EXISTING); - parseDatabase(tempFile); + ); } catch (Exception e) { throw new AssertionError(e); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 76c0e6e494a74..3e04f7bfea2de 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -215,7 +215,7 @@ protected void updateTimestamp(String name, Metadata old) { } void updateTaskState() { - PlainActionFuture> future = PlainActionFuture.newFuture(); + PlainActionFuture> future = new PlainActionFuture<>(); updatePersistentTaskState(state, future); state = ((GeoIpTaskState) future.actionGet().getState()); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 8534749cace61..1f170e0f796ff 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -197,8 +196,8 @@ public void clusterChanged(ClusterChangedEvent event) { } DiscoveryNode masterNode = event.state().nodes().getMasterNode(); - if (masterNode == null || masterNode.getVersion().before(Version.V_7_14_0)) { - // wait for master to be upgraded so it understands geoip task + if (masterNode == null) { + // no master yet return; } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 26ddbaa7ba854..30ecc96a3171c 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -9,7 +9,6 @@ package org.elasticsearch.ingest.geoip; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.internal.Client; @@ -65,6 +64,14 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemIndexPlugin, Closeable, PersistentTaskPlugin, ActionPlugin { public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); private static final int GEOIP_INDEX_MAPPINGS_VERSION = 1; + /** + * No longer used for determining the age of mappings, but system index descriptor + * code requires something be set. We use a value that can be parsed by + * old nodes in mixed-version clusters, just in case any old code exists that + * tries to parse version from index metadata, and that will indicate + * to these old nodes that the mappings are newer than they are. + */ + private static final String LEGACY_VERSION_FIELD_VALUE = "8.12.0"; private final SetOnce ingestService = new SetOnce<>(); private final SetOnce databaseRegistry = new SetOnce<>(); @@ -204,7 +211,7 @@ private static XContentBuilder mappings() { return jsonBuilder().startObject() .startObject(SINGLE_MAPPING_NAME) .startObject("_meta") - .field("version", Version.CURRENT) + .field("version", LEGACY_VERSION_FIELD_VALUE) .field(SystemIndexDescriptor.VERSION_META_KEY, GEOIP_INDEX_MAPPINGS_VERSION) .endObject() .field("dynamic", "strict") diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java index f71a55f4f6be0..69e33863b0f2b 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.common.lucene.search.function.CombineFunction; @@ -37,6 +36,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -44,6 +44,8 @@ import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -76,29 +78,32 @@ private SearchRequestBuilder buildRequest(String script, Object... params) { public void testBasic() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'] + 1").get(); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); + assertResponse(buildRequest("doc['foo'] + 1"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testFunction() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'] + abs(1)").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); + assertNoFailuresAndResponse(buildRequest("doc['foo'] + abs(1)"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testBasicUsingDotValue() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'].value + 1").get(); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + + prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); + assertResponse(buildRequest("doc['foo'].value + 1"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testScore() throws Exception { @@ -106,9 +111,9 @@ public void testScore() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("text", "hello goodbye"), - client().prepareIndex("test").setId("2").setSource("text", "hello hello hello goodbye"), - client().prepareIndex("test").setId("3").setSource("text", "hello hello goodebye") + prepareIndex("test").setId("1").setSource("text", "hello goodbye"), + prepareIndex("test").setId("2").setSource("text", "hello hello hello goodbye"), + prepareIndex("test").setId("3").setSource("text", "hello hello goodebye") ); ScriptScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction( new Script(ScriptType.INLINE, "expression", "1 / _score", Collections.emptyMap()) @@ -116,13 +121,14 @@ public void testScore() throws Exception { SearchRequestBuilder req = prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent - SearchResponse rsp = req.get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals("1", hits.getAt(0).getId()); - assertEquals("3", hits.getAt(1).getId()); - assertEquals("2", hits.getAt(2).getId()); + assertResponse(req, rsp -> { + assertNoFailures(rsp); + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals("1", hits.getAt(0).getId()); + assertEquals("3", hits.getAt(1).getId()); + assertEquals("2", hits.getAt(2).getId()); + }); req = prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); @@ -137,29 +143,33 @@ public void testDateMethods() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), - client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") + prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), + prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); - SearchResponse rsp = buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - SearchHits hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].getMonth() + 1").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(9.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(10.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].getYear()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].getMonth() + 1"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(9.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(10.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].getYear()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testDateObjectMethods() throws Exception { @@ -167,29 +177,33 @@ public void testDateObjectMethods() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), - client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") + prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), + prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); - SearchResponse rsp = buildRequest("doc['date0'].date.secondOfMinute - doc['date0'].date.minuteOfHour").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - SearchHits hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date0'].date.getHourOfDay() + doc['date1'].date.dayOfMonth").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].date.monthOfYear + 1").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(10.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].date.year").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['date0'].date.secondOfMinute - doc['date0'].date.minuteOfHour"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date0'].date.getHourOfDay() + doc['date1'].date.dayOfMonth"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].date.monthOfYear + 1"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(10.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].date.year"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testMultiValueMethods() throws Exception { @@ -216,90 +230,90 @@ public void testMultiValueMethods() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource(doc1), - client().prepareIndex("test").setId("2").setSource(doc2), - client().prepareIndex("test").setId("3").setSource(doc3) + prepareIndex("test").setId("1").setSource(doc1), + prepareIndex("test").setId("2").setSource(doc2), + prepareIndex("test").setId("3").setSource(doc3) ); - SearchResponse rsp = buildRequest("doc['double0'].count() + doc['double1'].count()").get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(2.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].sum()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(7.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(6.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].avg() + doc['double1'].avg()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(4.3, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(8.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.5, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].median()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(1.25, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].min()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(-1.5, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].max()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].sum()/doc['double0'].count()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(2.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double0'].count() + doc['double1'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(2.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].sum()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(7.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(6.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].avg() + doc['double1'].avg()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(4.3, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(8.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].median()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(1.25, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].min()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(-1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].max()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].sum()/doc['double0'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(2.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); // make sure count() works for missing - rsp = buildRequest("doc['double2'].count()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(0.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(0.0, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double2'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(0.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(0.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); // make sure .empty works in the same way - rsp = buildRequest("doc['double2'].empty ? 5.0 : 2.0").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(2.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double2'].empty ? 5.0 : 2.0"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(2.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); } public void testInvalidDateMethodCall() throws Exception { ElasticsearchAssertions.assertAcked(prepareCreate("test").setMapping("double", "type=double")); ensureGreen("test"); - indexRandom(true, client().prepareIndex("test").setId("1").setSource("double", "178000000.0")); + indexRandom(true, prepareIndex("test").setId("1").setSource("double", "178000000.0")); try { buildRequest("doc['double'].getYear()").get(); fail(); @@ -322,21 +336,21 @@ public void testSparseField() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "x", 4), - client().prepareIndex("test").setId("2").setSource("id", 2, "y", 2) + prepareIndex("test").setId("1").setSource("id", 1, "x", 4), + prepareIndex("test").setId("2").setSource("id", 2, "y", 2) ); - SearchResponse rsp = buildRequest("doc['x'] + 1").get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(2, rsp.getHits().getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['x'] + 1"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(2, rsp.getHits().getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testMissingField() throws Exception { createIndex("test"); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("x", 4).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("x", 4).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc['bogus']").get(); fail("Expected missing field to cause failure"); @@ -355,22 +369,23 @@ public void testParams() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "x", 10), - client().prepareIndex("test").setId("2").setSource("id", 2, "x", 3), - client().prepareIndex("test").setId("3").setSource("id", 3, "x", 5) + prepareIndex("test").setId("1").setSource("id", 1, "x", 10), + prepareIndex("test").setId("2").setSource("id", 2, "x", 3), + prepareIndex("test").setId("3").setSource("id", 3, "x", 5) ); // a = int, b = double, c = long String script = "doc['x'] * a + b + ((c + doc['x']) > 5000000009 ? 1 : 0)"; - SearchResponse rsp = buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L).get(); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(24.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(9.5, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(13.5, hits.getAt(2).field("foo").getValue(), 0.0D); + assertResponse(buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(24.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(9.5, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(13.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); } public void testCompileFailure() { - client().prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("garbage%@#%@").get(); fail("Expected expression compilation failure"); @@ -381,7 +396,7 @@ public void testCompileFailure() { } public void testNonNumericParam() { - client().prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("x", 1).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("a", "a", "astring").get(); fail("Expected string parameter to cause failure"); @@ -396,7 +411,7 @@ public void testNonNumericParam() { } public void testNonNumericField() { - client().prepareIndex("test").setId("1").setSource("text", "this is not a number").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("text", "this is not a number").setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc['text.keyword']").get(); fail("Expected text field to cause execution failure"); @@ -411,7 +426,7 @@ public void testNonNumericField() { } public void testInvalidGlobalVariable() { - client().prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("bogus").get(); fail("Expected bogus variable to cause execution failure"); @@ -426,7 +441,7 @@ public void testInvalidGlobalVariable() { } public void testDocWithoutField() { - client().prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc").get(); fail("Expected doc variable without field to cause execution failure"); @@ -441,7 +456,7 @@ public void testDocWithoutField() { } public void testInvalidFieldMember() { - client().prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", 5).setRefreshPolicy(IMMEDIATE).get(); try { buildRequest("doc['foo'].bogus").get(); fail("Expected bogus field member to cause execution failure"); @@ -461,9 +476,9 @@ public void testSpecialValueVariable() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("x", 5, "y", 1.2), - client().prepareIndex("test").setId("2").setSource("x", 10, "y", 1.4), - client().prepareIndex("test").setId("3").setSource("x", 13, "y", 1.8) + prepareIndex("test").setId("1").setSource("x", 5, "y", 1.2), + prepareIndex("test").setId("2").setSource("x", 10, "y", 1.4), + prepareIndex("test").setId("3").setSource("x", 13, "y", 1.8) ); SearchRequestBuilder req = prepareSearch().setIndices("test"); @@ -484,21 +499,22 @@ public void testSpecialValueVariable() throws Exception { .script(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "3.0", Collections.emptyMap())) ); - SearchResponse rsp = req.get(); - assertEquals(3, rsp.getHits().getTotalHits().value); + assertResponse(req, rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); - Stats stats = rsp.getAggregations().get("int_agg"); - assertEquals(39.0, stats.getMax(), 0.0001); - assertEquals(15.0, stats.getMin(), 0.0001); + Stats stats = rsp.getAggregations().get("int_agg"); + assertEquals(39.0, stats.getMax(), 0.0001); + assertEquals(15.0, stats.getMin(), 0.0001); - stats = rsp.getAggregations().get("double_agg"); - assertEquals(0.7, stats.getMax(), 0.0001); - assertEquals(0.1, stats.getMin(), 0.0001); + stats = rsp.getAggregations().get("double_agg"); + assertEquals(0.7, stats.getMax(), 0.0001); + assertEquals(0.1, stats.getMin(), 0.0001); - stats = rsp.getAggregations().get("const_agg"); - assertThat(stats.getMax(), equalTo(3.0)); - assertThat(stats.getMin(), equalTo(3.0)); - assertThat(stats.getAvg(), equalTo(3.0)); + stats = rsp.getAggregations().get("const_agg"); + assertThat(stats.getMax(), equalTo(3.0)); + assertThat(stats.getMin(), equalTo(3.0)); + assertThat(stats.getAvg(), equalTo(3.0)); + }); } public void testStringSpecialValueVariable() throws Exception { @@ -507,9 +523,9 @@ public void testStringSpecialValueVariable() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("text", "hello"), - client().prepareIndex("test").setId("2").setSource("text", "goodbye"), - client().prepareIndex("test").setId("3").setSource("text", "hello") + prepareIndex("test").setId("1").setSource("text", "hello"), + prepareIndex("test").setId("2").setSource("text", "goodbye"), + prepareIndex("test").setId("3").setSource("text", "hello") ); SearchRequestBuilder req = prepareSearch().setIndices("test"); @@ -520,18 +536,19 @@ public void testStringSpecialValueVariable() throws Exception { .script(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "_value", Collections.emptyMap())) ); - String message; + AtomicReference message = new AtomicReference<>(); try { // shards that don't have docs with the "text" field will not fail, // so we may or may not get a total failure - SearchResponse rsp = req.get(); - assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed - message = rsp.getShardFailures()[0].reason(); + assertResponse(req, rsp -> { + assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed + message.set(rsp.getShardFailures()[0].reason()); + }); } catch (SearchPhaseExecutionException e) { - message = e.toString(); + message.set(e.toString()); } - assertThat(message + "should have contained ScriptException", message.contains("ScriptException"), equalTo(true)); - assertThat(message + "should have contained text variable error", message.contains("text variable"), equalTo(true)); + assertThat(message + "should have contained ScriptException", message.get().contains("ScriptException"), equalTo(true)); + assertThat(message + "should have contained text variable error", message.get().contains("text variable"), equalTo(true)); } // test to make sure expressions are not allowed to be used as update scripts @@ -539,7 +556,7 @@ public void testInvalidUpdateScript() throws Exception { try { createIndex("test_index"); ensureGreen("test_index"); - indexRandom(true, client().prepareIndex("test_index").setId("1").setSource("text_field", "text")); + indexRandom(true, prepareIndex("test_index").setId("1").setSource("text_field", "text")); UpdateRequestBuilder urb = client().prepareUpdate().setIndex("test_index"); urb.setId("1"); urb.setScript(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "0", Collections.emptyMap())); @@ -559,50 +576,58 @@ public void testPipelineAggregationScript() throws Exception { ensureGreen("agg_index"); indexRandom( true, - client().prepareIndex("agg_index").setId("1").setSource("one", 1.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("2").setSource("one", 2.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("3").setSource("one", 3.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("4").setSource("one", 4.0, "two", 2.0, "three", 3.0, "four", 4.0), - client().prepareIndex("agg_index").setId("5").setSource("one", 5.0, "two", 2.0, "three", 3.0, "four", 4.0) + prepareIndex("agg_index").setId("1").setSource("one", 1.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("2").setSource("one", 2.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("3").setSource("one", 3.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("4").setSource("one", 4.0, "two", 2.0, "three", 3.0, "four", 4.0), + prepareIndex("agg_index").setId("5").setSource("one", 5.0, "two", 2.0, "three", 3.0, "four", 4.0) ); - SearchResponse response = prepareSearch("agg_index").addAggregation( - histogram("histogram").field("one") - .interval(2) - .subAggregation(sum("twoSum").field("two")) - .subAggregation(sum("threeSum").field("three")) - .subAggregation(sum("fourSum").field("four")) - .subAggregation( - bucketScript( - "totalSum", - new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), - "twoSum", - "threeSum", - "fourSum" + assertResponse( + prepareSearch("agg_index").addAggregation( + histogram("histogram").field("one") + .interval(2) + .subAggregation(sum("twoSum").field("two")) + .subAggregation(sum("threeSum").field("three")) + .subAggregation(sum("fourSum").field("four")) + .subAggregation( + bucketScript( + "totalSum", + new Script( + ScriptType.INLINE, + ExpressionScriptEngine.NAME, + "_value0 + _value1 + _value2", + Collections.emptyMap() + ), + "twoSum", + "threeSum", + "fourSum" + ) ) - ) - ).execute().actionGet(); - - Histogram histogram = response.getAggregations().get("histogram"); - assertThat(histogram, notNullValue()); - assertThat(histogram.getName(), equalTo("histogram")); - List buckets = histogram.getBuckets(); - - for (int bucketCount = 0; bucketCount < buckets.size(); ++bucketCount) { - Histogram.Bucket bucket = buckets.get(bucketCount); - if (bucket.getDocCount() == 1) { - SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); - assertThat(seriesArithmetic, notNullValue()); - double seriesArithmeticValue = seriesArithmetic.value(); - assertEquals(9.0, seriesArithmeticValue, 0.001); - } else if (bucket.getDocCount() == 2) { - SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); - assertThat(seriesArithmetic, notNullValue()); - double seriesArithmeticValue = seriesArithmetic.value(); - assertEquals(18.0, seriesArithmeticValue, 0.001); - } else { - fail("Incorrect number of documents in a bucket in the histogram."); + ), + response -> { + Histogram histogram = response.getAggregations().get("histogram"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histogram")); + List buckets = histogram.getBuckets(); + + for (int bucketCount = 0; bucketCount < buckets.size(); ++bucketCount) { + Histogram.Bucket bucket = buckets.get(bucketCount); + if (bucket.getDocCount() == 1) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertEquals(9.0, seriesArithmeticValue, 0.001); + } else if (bucket.getDocCount() == 2) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertEquals(18.0, seriesArithmeticValue, 0.001); + } else { + fail("Incorrect number of documents in a bucket in the histogram."); + } + } } - } + ); } public void testGeo() throws Exception { @@ -615,8 +640,7 @@ public void testGeo() throws Exception { xContentBuilder.endObject().endObject().endObject().endObject(); assertAcked(prepareCreate("test").setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "test") @@ -626,29 +650,28 @@ public void testGeo() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); // access .lat - SearchResponse rsp = buildRequest("doc['location'].lat").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(61.5240, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].lat"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(61.5240, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // access .lon - rsp = buildRequest("doc['location'].lon").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(105.3188, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].lon"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(105.3188, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // access .empty - rsp = buildRequest("doc['location'].empty ? 1 : 0").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(0, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].empty ? 1 : 0"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(0, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // call haversin - rsp = buildRequest("haversin(38.9072, 77.0369, doc['location'].lat, doc['location'].lon)").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(3170D, rsp.getHits().getAt(0).field("foo").getValue(), 50D); + assertNoFailuresAndResponse(buildRequest("haversin(38.9072, 77.0369, doc['location'].lat, doc['location'].lon)"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(3170D, rsp.getHits().getAt(0).field("foo").getValue(), 50D); + }); } public void testBoolean() throws Exception { @@ -663,32 +686,32 @@ public void testBoolean() throws Exception { ensureGreen(); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "price", 1.0, "vip", true), - client().prepareIndex("test").setId("2").setSource("id", 2, "price", 2.0, "vip", false), - client().prepareIndex("test").setId("3").setSource("id", 3, "price", 2.0, "vip", false) + prepareIndex("test").setId("1").setSource("id", 1, "price", 1.0, "vip", true), + prepareIndex("test").setId("2").setSource("id", 2, "price", 2.0, "vip", false), + prepareIndex("test").setId("3").setSource("id", 3, "price", 2.0, "vip", false) ); // access .value - SearchResponse rsp = buildRequest("doc['vip'].value").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'].value"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); // access .empty - rsp = buildRequest("doc['vip'].empty ? 1 : 0").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(0.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(1.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'].empty ? 1 : 0"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(0.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(1.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); // ternary operator // vip's have a 50% discount - rsp = buildRequest("doc['vip'] ? doc['price']/2 : doc['price']").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(0.5D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(2.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(2.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'] ? doc['price']/2 : doc['price']"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(0.5D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(2.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(2.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); } public void testFilterScript() throws Exception { @@ -696,15 +719,15 @@ public void testFilterScript() throws Exception { ensureGreen("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("id", 1, "foo", 1.0), - client().prepareIndex("test").setId("2").setSource("id", 2, "foo", 0.0) + prepareIndex("test").setId("1").setSource("id", 1, "foo", 1.0), + prepareIndex("test").setId("2").setSource("id", 2, "foo", 0.0) ); SearchRequestBuilder builder = buildRequest("doc['foo'].value"); Script script = new Script(ScriptType.INLINE, "expression", "doc['foo'].value", Collections.emptyMap()); builder.setQuery(QueryBuilders.boolQuery().filter(QueryBuilders.scriptQuery(script))); - SearchResponse rsp = builder.get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(builder, rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } } diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java index dcf380d338c14..121a6b01ea792 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java @@ -41,7 +41,7 @@ protected Collection> nodePlugins() { public void testAllOpsDisabledIndexedScripts() throws IOException { clusterAdmin().preparePutStoredScript().setId("script1").setContent(new BytesArray(""" {"script": {"lang": "expression", "source": "2"} }"""), XContentType.JSON).get(); - client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON).get(); try { client().prepareUpdate("test", "1").setScript(new Script(ScriptType.STORED, null, "script1", Collections.emptyMap())).get(); fail("update script should have been rejected"); diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index 000728209456f..d9e346454aefe 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -59,9 +59,7 @@ public void testBasic() throws Exception { final int numDocs = randomIntBetween(10, 100); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("msearch") - .setId(String.valueOf(i)) - .setSource("odd", (i % 2 == 0), "group", (i % 3)); + indexRequestBuilders[i] = prepareIndex("msearch").setId(String.valueOf(i)).setSource("odd", (i % 2 == 0), "group", (i % 3)); } indexRandom(true, indexRequestBuilders); diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 517828cbeba3c..77480e6bc9e63 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -55,8 +55,8 @@ protected Settings nodeSettings() { @Before public void setup() throws IOException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "value1").endObject()).get(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("text", "value2").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "value1").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("text", "value2").endObject()).get(); indicesAdmin().prepareRefresh().get(); } @@ -166,11 +166,11 @@ public void testIndexedTemplateClient() throws Exception { assertNotNull(getResponse.getSource()); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); bulkRequestBuilder.get(); indicesAdmin().prepareRefresh().get(); @@ -263,11 +263,11 @@ public void testIndexedTemplate() throws Exception { assertAcked(clusterAdmin().preparePutStoredScript().setId("3").setContent(new BytesArray(script), XContentType.JSON)); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); bulkRequestBuilder.get(); indicesAdmin().prepareRefresh().get(); @@ -304,7 +304,7 @@ public void testIndexedTemplateOverwrite() throws Exception { createIndex("testindex"); ensureGreen("testindex"); - client().prepareIndex("testindex").setId("1").setSource(jsonBuilder().startObject().field("searchtext", "dev1").endObject()).get(); + prepareIndex("testindex").setId("1").setSource(jsonBuilder().startObject().field("searchtext", "dev1").endObject()).get(); indicesAdmin().prepareRefresh().get(); int iterations = randomIntBetween(2, 11); @@ -382,11 +382,11 @@ public void testIndexedTemplateWithArray() throws Exception { }"""; assertAcked(clusterAdmin().preparePutStoredScript().setId("4").setContent(new BytesArray(multiQuery), XContentType.JSON)); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); bulkRequestBuilder.get(); indicesAdmin().prepareRefresh().get(); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java index d859fb509e915..4b0c365ba8b13 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -8,6 +8,9 @@ package org.elasticsearch.script.mustache; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; @@ -31,6 +34,8 @@ public class TransportMultiSearchTemplateAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportMultiSearchTemplateAction.class); + private final ScriptService scriptService; private final NamedXContentRegistry xContentRegistry; private final NodeClient client; @@ -76,6 +81,9 @@ protected void doExecute(Task task, MultiSearchTemplateRequest request, ActionLi searchRequest = convert(searchTemplateRequest, searchTemplateResponse, scriptService, xContentRegistry, searchUsageHolder); } catch (Exception e) { items[i] = new MultiSearchTemplateResponse.Item(null, e); + if (ExceptionsHelper.status(e).getStatus() >= 500 && ExceptionsHelper.isNodeOrShardUnavailableTypeException(e) == false) { + logger.warn("MultiSearchTemplate convert failure", e); + } continue; } items[i] = new MultiSearchTemplateResponse.Item(searchTemplateResponse, null); diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index 30937ebcbd773..1fcf776ac8428 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -8,7 +8,6 @@ package org.elasticsearch.painless.spi; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -47,11 +46,10 @@ public Whitelist( List whitelistClassBindings, List whitelistInstanceBindings ) { - this.classLoader = Objects.requireNonNull(classLoader); - this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); - this.whitelistImportedMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistImportedMethods)); - this.whitelistClassBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistClassBindings)); - this.whitelistInstanceBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistInstanceBindings)); + this.whitelistClasses = List.copyOf(whitelistClasses); + this.whitelistImportedMethods = List.copyOf(whitelistImportedMethods); + this.whitelistClassBindings = List.copyOf(whitelistClassBindings); + this.whitelistInstanceBindings = List.copyOf(whitelistInstanceBindings); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java index 2130f9343dfa3..1daad59768a15 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -59,23 +58,12 @@ public WhitelistClass( List whitelistFields, List painlessAnnotations ) { - this.origin = Objects.requireNonNull(origin); this.javaClassName = Objects.requireNonNull(javaClassName); - - this.whitelistConstructors = Collections.unmodifiableList(Objects.requireNonNull(whitelistConstructors)); - this.whitelistMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistMethods)); - this.whitelistFields = Collections.unmodifiableList(Objects.requireNonNull(whitelistFields)); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.whitelistConstructors = List.copyOf(whitelistConstructors); + this.whitelistMethods = List.copyOf(whitelistMethods); + this.whitelistFields = List.copyOf(whitelistFields); + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java index c1a3c43196647..872482bcf6281 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistField.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -39,16 +38,7 @@ public WhitelistField(String origin, String fieldName, String canonicalTypeNameP this.origin = Objects.requireNonNull(origin); this.fieldName = Objects.requireNonNull(fieldName); this.canonicalTypeNameParameter = Objects.requireNonNull(canonicalTypeNameParameter); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java index 8451d1c9f3ef4..8927d290ecc77 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java @@ -8,11 +8,10 @@ package org.elasticsearch.painless.spi; -import java.util.AbstractMap; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -69,22 +68,12 @@ public WhitelistMethod( List canonicalTypeNameParameters, List painlessAnnotations ) { - this.origin = Objects.requireNonNull(origin); this.augmentedCanonicalClassName = augmentedCanonicalClassName; this.methodName = methodName; this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); - this.canonicalTypeNameParameters = Collections.unmodifiableList(Objects.requireNonNull(canonicalTypeNameParameters)); - - if (painlessAnnotations.isEmpty()) { - this.painlessAnnotations = Collections.emptyMap(); - } else { - this.painlessAnnotations = Collections.unmodifiableMap( - Objects.requireNonNull(painlessAnnotations) - .stream() - .map(painlessAnnotation -> new AbstractMap.SimpleEntry<>(painlessAnnotation.getClass(), painlessAnnotation)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ); - } + this.canonicalTypeNameParameters = List.copyOf(canonicalTypeNameParameters); + this.painlessAnnotations = painlessAnnotations.stream() + .collect(Collectors.toUnmodifiableMap(Object::getClass, Function.identity())); } } diff --git a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/search/SyntheticSourceIT.java b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/search/SyntheticSourceIT.java new file mode 100644 index 0000000000000..0383999d6f7e5 --- /dev/null +++ b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/search/SyntheticSourceIT.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless.search; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.painless.PainlessPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +public class SyntheticSourceIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(PainlessPlugin.class); + } + + public void testSearchUsingRuntimeField() throws Exception { + createIndex(); + + int numDocs = between(1000, 5000); + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder indexRequest = client().prepareIndex("test").setSource("id", "" + i); + if (randomInt(100) < 5) { + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + } + indexRequest.get(); + } + client().admin().indices().prepareRefresh("test").get(); + assertNoFailures(client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("long_id").from(0))); + } + + private void createIndex() throws IOException { + XContentBuilder mapping = JsonXContent.contentBuilder(); + mapping.startObject(); + { + mapping.startObject("_source"); + mapping.field("mode", "synthetic"); + mapping.endObject(); + } + { + mapping.startObject("runtime"); + mapping.startObject("long_id"); + mapping.field("type", "long"); + mapping.field("script", "emit(Long.parseLong(params._source.id));"); + mapping.endObject(); + mapping.endObject(); + mapping.startObject("properties"); + mapping.startObject("id").field("type", "keyword").endObject(); + mapping.endObject(); + } + mapping.endObject(); + + assertAcked(client().admin().indices().prepareCreate("test").setMapping(mapping).get()); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index e9a3b2c1fd7f7..1f8b7b909909f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -222,10 +222,6 @@ private Location location(ParserRuleContext ctx) { return new Location(sourceName, ctx.getStart().getStartIndex()); } - private Location location(TerminalNode tn) { - return new Location(sourceName, tn.getSymbol().getStartIndex()); - } - @Override public ANode visitSource(SourceContext ctx) { List functions = new ArrayList<>(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index 3fc572d8446bc..d32639bf3968f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -45,7 +45,7 @@ public final class PainlessClass { this.staticFields = Map.copyOf(staticFields); this.fields = Map.copyOf(fields); this.functionalInterfaceMethod = functionalInterfaceMethod; - this.annotations = annotations; + this.annotations = Map.copyOf(annotations); this.getterMethodHandles = Map.copyOf(getterMethodHandles); this.setterMethodHandles = Map.copyOf(setterMethodHandles); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index bf001c5e49db9..0c1497b541954 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -1680,6 +1680,7 @@ public PainlessLookup build() { ); } + classesToDirectSubClasses.replaceAll((key, set) -> Set.copyOf(set)); // save some memory, especially when set is empty return new PainlessLookup( javaClassNamesToClasses, canonicalClassNamesToClasses, diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java index a7fe63eb34ce6..2b9c35429c328 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java @@ -59,7 +59,7 @@ public void testLegacyCircle() throws Exception { ); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { + indexRandom(true, prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { builder.startObject() .field("type", "circle") .startArray("coordinates") diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 3ae6e29802962..afd969cc17ad4 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -130,17 +130,6 @@ public static class PrefixTrees { public static final String GEOHASH = "geohash"; } - @Deprecated - public static class DeprecatedParameters { - - private static void checkPrefixTreeSupport(String fieldName) { - if (ShapesAvailability.JTS_AVAILABLE == false || ShapesAvailability.SPATIAL4J_AVAILABLE == false) { - throw new ElasticsearchParseException("Field parameter [{}] is not supported for [{}] field type", fieldName, CONTENT_TYPE); - } - - } - } - private static Builder builder(FieldMapper in) { return ((LegacyGeoShapeFieldMapper) in).builder; } diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java index 6ef1f4c8a99b6..6ad4d2c06c6d4 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java @@ -86,16 +86,14 @@ public void testPointsOnlyExplicit() throws Exception { // MULTIPOINT MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - client().prepareIndex("geo_points_only") - .setId("1") + prepareIndex("geo_points_only").setId("1") .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); // POINT Point point = GeometryTestUtils.randomPoint(false); - client().prepareIndex("geo_points_only") - .setId("2") + prepareIndex("geo_points_only").setId("2") .setSource(GeoJson.toXContent(point, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -125,8 +123,7 @@ public void testPointsOnly() throws Exception { Geometry geometry = GeometryTestUtils.randomGeometry(false); try { - client().prepareIndex("geo_points_only") - .setId("1") + prepareIndex("geo_points_only").setId("1") .setSource(GeoJson.toXContent(geometry, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -161,8 +158,7 @@ public void testFieldAlias() throws IOException { ensureGreen(); MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(false); - client().prepareIndex(defaultIndexName) - .setId("1") + prepareIndex(defaultIndexName).setId("1") .setSource(GeoJson.toXContent(multiPoint, jsonBuilder().startObject().field(defaultFieldName), null).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java index 3a7f9a1ca6eb5..c6544bac2b13c 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java @@ -106,14 +106,11 @@ private void init() throws IOException { BulkResponse bulk = client().prepareBulk() .add( - client().prepareIndex(INDEX_NAME) - .setId("all") + prepareIndex(INDEX_NAME).setId("all") .setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10, HIGHER_RANKED_FEATURE, 20))) ) - .add(client().prepareIndex(INDEX_NAME).setId("lower").setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10)))) - .add( - client().prepareIndex(INDEX_NAME).setId("higher").setSource(Map.of("all_rank_features", Map.of(HIGHER_RANKED_FEATURE, 20))) - ) + .add(prepareIndex(INDEX_NAME).setId("lower").setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10)))) + .add(prepareIndex(INDEX_NAME).setId("higher").setSource(Map.of("all_rank_features", Map.of(HIGHER_RANKED_FEATURE, 20)))) .get(); assertFalse(bulk.buildFailureMessage(), bulk.hasFailures()); assertThat(refresh().getFailedShards(), equalTo(0)); diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java index ed5d89ad1df8c..08a3d046b00f7 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java @@ -32,6 +32,7 @@ import java.util.Collection; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -72,12 +73,12 @@ protected Collection> nodePlugins() { public void testSearchReturnsTokenCount() throws IOException { init(); - assertSearchReturns(searchById("single"), "single"); - assertSearchReturns(searchById("bulk1"), "bulk1"); - assertSearchReturns(searchById("bulk2"), "bulk2"); - assertSearchReturns(searchById("multi"), "multi"); - assertSearchReturns(searchById("multibulk1"), "multibulk1"); - assertSearchReturns(searchById("multibulk2"), "multibulk2"); + assertResponse(searchById("single"), resp -> assertSearchReturns(resp, "single")); + assertResponse(searchById("bulk1"), resp -> assertSearchReturns(resp, "bulk1")); + assertResponse(searchById("bulk2"), resp -> assertSearchReturns(resp, "bulk2")); + assertResponse(searchById("multi"), resp -> assertSearchReturns(resp, "multi")); + assertResponse(searchById("multibulk1"), resp -> assertSearchReturns(resp, "multibulk1")); + assertResponse(searchById("multibulk2"), resp -> assertSearchReturns(resp, "multibulk2")); } /** @@ -86,11 +87,14 @@ public void testSearchReturnsTokenCount() throws IOException { public void testSearchByTokenCount() throws IOException { init(); - assertSearchReturns(searchByNumericRange(4, 4).get(), "single"); - assertSearchReturns(searchByNumericRange(10, 10).get(), "multibulk2"); - assertSearchReturns(searchByNumericRange(7, 10).get(), "multi", "multibulk1", "multibulk2"); - assertSearchReturns(searchByNumericRange(1, 10).get(), "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); - assertSearchReturns(searchByNumericRange(12, 12).get()); + assertResponse(searchByNumericRange(4, 4), response -> assertSearchReturns(response, "single")); + assertResponse(searchByNumericRange(10, 10), response -> assertSearchReturns(response, "multibulk2")); + assertResponse(searchByNumericRange(7, 10), response -> assertSearchReturns(response, "multi", "multibulk1", "multibulk2")); + assertResponse( + searchByNumericRange(1, 10), + response -> assertSearchReturns(response, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2") + ); + assertResponse(searchByNumericRange(12, 12), this::assertSearchReturns); } /** @@ -100,11 +104,12 @@ public void testFacetByTokenCount() throws IOException { init(); String facetField = randomFrom(Arrays.asList("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")); - SearchResponse result = searchByNumericRange(1, 10).addAggregation(AggregationBuilders.terms("facet").field(facetField)).get(); - assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); - assertThat(result.getAggregations().asList().size(), equalTo(1)); - Terms terms = (Terms) result.getAggregations().asList().get(0); - assertThat(terms.getBuckets().size(), equalTo(9)); + assertResponse(searchByNumericRange(1, 10).addAggregation(AggregationBuilders.terms("facet").field(facetField)), result -> { + assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); + assertThat(result.getAggregations().asList().size(), equalTo(1)); + Terms terms = (Terms) result.getAggregations().asList().get(0); + assertThat(terms.getBuckets().size(), equalTo(9)); + }); } private void init() throws IOException { @@ -171,11 +176,11 @@ private void init() throws IOException { } private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOException { - return client().prepareIndex("test").setId(id).setSource("foo", texts); + return prepareIndex("test").setId(id).setSource("foo", texts); } - private SearchResponse searchById(String id) { - return prepareTokenCountFieldMapperSearch().setQuery(QueryBuilders.termQuery("_id", id)).get(); + private SearchRequestBuilder searchById(String id) { + return prepareTokenCountFieldMapperSearch().setQuery(QueryBuilders.termQuery("_id", id)); } private SearchRequestBuilder searchByNumericRange(int low, int high) { diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index ee04346591009..161cb1674a7b9 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -324,9 +324,9 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (textFieldType.isSyntheticSource()) { - return BlockStoredFieldsReader.bytesRefsFromStrings(storedFieldNameForSyntheticSource()); + return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(storedFieldNameForSyntheticSource()); } - return BlockSourceReader.bytesRefs(SourceValueFetcher.toString(blContext.sourcePaths(name()))); + return new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(blContext.sourcePaths(name()))); } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index abed23621d5e9..b35fb09c2d053 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -310,13 +310,13 @@ public Query rangeQuery( public BlockLoader blockLoader(BlockLoaderContext blContext) { if (indexMode == IndexMode.TIME_SERIES && metricType == TimeSeriesParams.MetricType.COUNTER) { // Counters are not supported by ESQL so we load them in null - return BlockDocValuesReader.nulls(); + return BlockLoader.CONSTANT_NULLS; } if (hasDocValues()) { double scalingFactorInverse = 1d / scalingFactor; - return BlockDocValuesReader.doubles(name(), l -> l * scalingFactorInverse); + return new BlockDocValuesReader.DoublesBlockLoader(name(), l -> l * scalingFactorInverse); } - return BlockSourceReader.doubles(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java index 9ddbc72e8ff94..b4ee066a0e391 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/BWCTemplateTests.java @@ -34,8 +34,8 @@ public void testBeatsTemplatesBWC() throws Exception { indicesAdmin().preparePutTemplate("packetbeat").setSource(packetBeat, XContentType.JSON).get(); indicesAdmin().preparePutTemplate("filebeat").setSource(fileBeat, XContentType.JSON).get(); - client().prepareIndex("metricbeat-foo").setId("1").setSource("message", "foo").get(); - client().prepareIndex("packetbeat-foo").setId("1").setSource("message", "foo").get(); - client().prepareIndex("filebeat-foo").setId("1").setSource("message", "foo").get(); + prepareIndex("metricbeat-foo").setId("1").setSource("message", "foo").get(); + prepareIndex("packetbeat-foo").setId("1").setSource("message", "foo").get(); + prepareIndex("filebeat-foo").setId("1").setSource("message", "foo").get(); } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index 34ead2c21480b..e433ce0b60596 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -511,7 +511,7 @@ public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrCh createIndexRequest("test", "parent", "1", null, "p_field", 1).get(); createIndexRequest("test", "child", "2", "1", "c_field", 1).get(); - client().prepareIndex("test").setId("3").setSource("p_field", 1).get(); + prepareIndex("test").setId("3").setSource("p_field", 1).get(); refresh(); assertHitCountAndNoFailures( @@ -736,8 +736,7 @@ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Excep 0L ); - client().prepareIndex("test") - .setSource(jsonBuilder().startObject().field("text", "value").endObject()) + prepareIndex("test").setSource(jsonBuilder().startObject().field("text", "value").endObject()) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); @@ -761,7 +760,7 @@ public void testHasChildAndHasParentFilter_withFilter() throws Exception { createIndexRequest("test", "child", "2", "1", "c_field", 1).get(); indicesAdmin().prepareFlush("test").get(); - client().prepareIndex("test").setId("3").setSource("p_field", 2).get(); + prepareIndex("test").setId("3").setSource("p_field", 2).get(); refresh(); assertNoFailuresAndResponse( @@ -806,8 +805,10 @@ public void testHasChildInnerHitsHighlighting() throws Exception { assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); SearchHit[] searchHits = response.getHits().getHits()[0].getInnerHits().get("child").getHits(); assertThat(searchHits.length, equalTo(1)); - assertThat(searchHits[0].getHighlightFields().get("c_field").getFragments().length, equalTo(1)); - assertThat(searchHits[0].getHighlightFields().get("c_field").getFragments()[0].string(), equalTo("foo bar")); + HighlightField highlightField1 = searchHits[0].getHighlightFields().get("c_field"); + assertThat(highlightField1.fragments().length, equalTo(1)); + HighlightField highlightField = searchHits[0].getHighlightFields().get("c_field"); + assertThat(highlightField.fragments()[0].string(), equalTo("foo bar")); } ); } @@ -1301,7 +1302,7 @@ public void testParentChildQueriesNoParentType() throws Exception { ensureGreen(); String parentId = "p1"; - client().prepareIndex("test").setId(parentId).setSource("p_field", "1").get(); + prepareIndex("test").setId(parentId).setSource("p_field", "1").get(); refresh(); try { @@ -1407,8 +1408,7 @@ public void testParentChildQueriesViaScrollApi() throws Exception { .setSize(1) .addStoredField("_id") .setQuery(query) - .execute() - .actionGet(); + .get(); assertNoFailures(scrollResponse); assertThat(scrollResponse.getHits().getTotalHits().value, equalTo(10L)); @@ -1786,7 +1786,7 @@ public void testHighlightersIgnoreParentChild() throws IOException { assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("parent-id")); HighlightField highlightField = response.getHits().getAt(0).getHighlightFields().get("searchText"); - assertThat(highlightField.getFragments()[0].string(), equalTo("quick brown fox")); + assertThat(highlightField.fragments()[0].string(), equalTo("quick brown fox")); } ); @@ -1799,7 +1799,7 @@ public void testHighlightersIgnoreParentChild() throws IOException { assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("child-id")); HighlightField highlightField = response.getHits().getAt(0).getHighlightFields().get("searchText"); - assertThat(highlightField.getFragments()[0].string(), equalTo("quick brown fox")); + assertThat(highlightField.fragments()[0].string(), equalTo("quick brown fox")); } ); } diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java index 39a84f2d16d7f..f851678b6c9d6 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/InnerHitsIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -183,10 +184,8 @@ public void testSimpleParentChild() throws Exception { response -> { SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getHits().length, equalTo(1)); - assertThat( - innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), - equalTo("fox eat quick") - ); + HighlightField highlightField = innerHits.getAt(0).getHighlightFields().get("message"); + assertThat(highlightField.fragments()[0].string(), equalTo("fox eat quick")); assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(message:fox")); assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("fox eat quick")); assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); @@ -627,7 +626,7 @@ public void testInnerHitsWithIgnoreUnmapped() { assertAcked(prepareCreate("index2")); createIndexRequest("index1", "parent_type", "1", null, "nested_type", Collections.singletonMap("key", "value")).get(); createIndexRequest("index1", "child_type", "2", "1").get(); - client().prepareIndex("index2").setId("3").setSource("key", "value").get(); + prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); assertSearchHitsWithoutFailures( prepareSearch("index1", "index2").setQuery( diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java index a67ebd4cbca22..02f24a67dda02 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -100,7 +100,7 @@ private IndexRequestBuilder createIndexRequest(String index, String type, String String name = type; type = "doc"; - IndexRequestBuilder indexRequestBuilder = client().prepareIndex(index).setId(id); + IndexRequestBuilder indexRequestBuilder = prepareIndex(index).setId(id); Map joinField = new HashMap<>(); if (parentId != null) { joinField.put("name", name); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java index b130411e5e099..c7999f27834a9 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; +import java.util.function.ToLongFunction; public class ParentAggregationBuilder extends ValuesSourceAggregationBuilder { @@ -90,7 +91,7 @@ public BucketCardinality bucketCardinality() { } @Override - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { return false; } diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index cad976411b8da..88a39fe4aebc8 100644 --- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -10,8 +10,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.MultiSearchResponse.Item; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -58,6 +57,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xcontent.XContentFactory.yamlBuilder; @@ -80,16 +80,13 @@ public void testPercolatorQuery() throws Exception { .setMapping("id", "type=keyword", "field1", "type=keyword", "field2", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchAllQuery()).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("id", "2").field("query", matchQuery("field1", "value")).endObject()) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("id", "3") @@ -101,52 +98,66 @@ public void testPercolatorQuery() throws Exception { BytesReference source = BytesReference.bytes(jsonBuilder().startObject().endObject()); logger.info("percolating empty doc"); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); logger.info("percolating doc with 1 field"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + } + ); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); - + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + } + ); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(1))); + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) + ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat( + response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat( + response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(1))); + } + ); } public void testPercolatorRangeQueries() throws Exception { @@ -166,16 +177,13 @@ public void testPercolatorRangeQueries() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(10).to(12)).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(20).to(22)).endObject()) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("query", boolQuery().must(rangeQuery("field1").from(10).to(12)).must(rangeQuery("field1").from(12).to(14))) @@ -183,16 +191,13 @@ public void testPercolatorRangeQueries() throws Exception { ) .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field2").from(10).to(12)).endObject()) .get(); - client().prepareIndex("test") - .setId("5") + prepareIndex("test").setId("5") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field2").from(20).to(22)).endObject()) .get(); - client().prepareIndex("test") - .setId("6") + prepareIndex("test").setId("6") .setSource( jsonBuilder().startObject() .field("query", boolQuery().must(rangeQuery("field2").from(10).to(12)).must(rangeQuery("field2").from(12).to(14))) @@ -200,16 +205,13 @@ public void testPercolatorRangeQueries() throws Exception { ) .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("7") + prepareIndex("test").setId("7") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field3").from("192.168.1.0").to("192.168.1.5")).endObject()) .get(); - client().prepareIndex("test") - .setId("8") + prepareIndex("test").setId("8") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field3").from("192.168.1.20").to("192.168.1.30")).endObject()) .get(); - client().prepareIndex("test") - .setId("9") + prepareIndex("test").setId("9") .setSource( jsonBuilder().startObject() .field( @@ -220,8 +222,7 @@ public void testPercolatorRangeQueries() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("10") + prepareIndex("test").setId("10") .setSource( jsonBuilder().startObject() .field( @@ -236,46 +237,52 @@ public void testPercolatorRangeQueries() throws Exception { // Test long range: BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 12).endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - logger.info("response={}", response); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + logger.info("response={}", response); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 11).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); // Test double range: source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 12).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("6")); - assertThat(response.getHits().getAt(1).getId(), equalTo("4")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("6")); + assertThat(response.getHits().getAt(1).getId(), equalTo("4")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 11).endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("4")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("4")); + }); // Test IP range: source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.5").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("9")); - assertThat(response.getHits().getAt(1).getId(), equalTo("7")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("9")); + assertThat(response.getHits().getAt(1).getId(), equalTo("7")); + }); source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.4").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("7")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("7")); + }); // Test date range: source = BytesReference.bytes(jsonBuilder().startObject().field("field4", "2016-05-15").endObject()); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("10")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("10")); + }); } public void testPercolatorGeoQueries() throws Exception { @@ -283,8 +290,7 @@ public void testPercolatorGeoQueries() throws Exception { indicesAdmin().prepareCreate("test").setMapping("id", "type=keyword", "field1", "type=geo_point", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("query", geoDistanceQuery("field1").point(52.18, 4.38).distance(50, DistanceUnit.KILOMETERS)) @@ -293,8 +299,7 @@ public void testPercolatorGeoQueries() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("query", geoBoundingBoxQuery("field1").setCorners(52.3, 4.4, 52.1, 4.6)) @@ -303,8 +308,7 @@ public void testPercolatorGeoQueries() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field( @@ -323,13 +327,15 @@ public void testPercolatorGeoQueries() throws Exception { BytesReference source = BytesReference.bytes( jsonBuilder().startObject().startObject("field1").field("lat", 52.20).field("lon", 4.51).endObject().endObject() ); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testPercolatorQueryExistingDocument() throws Exception { @@ -338,16 +344,13 @@ public void testPercolatorQueryExistingDocument() throws Exception { .setMapping("id", "type=keyword", "field1", "type=keyword", "field2", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchAllQuery()).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("id", "2").field("query", matchQuery("field1", "value")).endObject()) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("id", "3") @@ -356,32 +359,37 @@ public void testPercolatorQueryExistingDocument() throws Exception { ) .get(); - client().prepareIndex("test").setId("4").setSource("{\"id\": \"4\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("5").setSource(XContentType.JSON, "id", "5", "field1", "value").get(); - client().prepareIndex("test").setId("6").setSource(XContentType.JSON, "id", "6", "field1", "value", "field2", "value").get(); + prepareIndex("test").setId("4").setSource("{\"id\": \"4\"}", XContentType.JSON).get(); + prepareIndex("test").setId("5").setSource(XContentType.JSON, "id", "5", "field1", "value").get(); + prepareIndex("test").setId("6").setSource(XContentType.JSON, "id", "6", "field1", "value", "field2", "value").get(); indicesAdmin().prepareRefresh().get(); logger.info("percolating empty doc"); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "1", null, null, null)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + }); logger.info("percolating doc with 1 field"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } + ); logger.info("percolating doc with 2 fields"); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception { @@ -390,9 +398,9 @@ public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception .setMapping("_source", "enabled=false", "field1", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()).get(); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); indicesAdmin().prepareRefresh().get(); logger.info("percolating empty doc with source disabled"); @@ -408,8 +416,7 @@ public void testPercolatorSpecificQueries() throws Exception { .setMapping("id", "type=keyword", "field1", "type=text", "field2", "type=text", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("id", "1") @@ -417,8 +424,7 @@ public void testPercolatorSpecificQueries() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("id", "2") @@ -433,8 +439,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("id", "3") @@ -454,8 +459,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); // doesn't match - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .field("id", "4") @@ -481,16 +485,18 @@ public void testPercolatorSpecificQueries() throws Exception { .field("field2", "the quick brown fox falls down into the well") .endObject() ); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 3); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); + } + ); } public void testPercolatorQueryWithHighlighting() throws Exception { @@ -504,228 +510,245 @@ public void testPercolatorQueryWithHighlighting() throws Exception { indicesAdmin().prepareCreate("test") .setMapping("id", "type=keyword", "field1", fieldMapping.toString(), "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchQuery("field1", "brown fox")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("2") + .get(); + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("id", "2").field("query", matchQuery("field1", "lazy dog")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("3") + .get(); + prepareIndex("test").setId("3") .setSource(jsonBuilder().startObject().field("id", "3").field("query", termQuery("field1", "jumps")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("4") + .get(); + prepareIndex("test").setId("4") .setSource(jsonBuilder().startObject().field("id", "4").field("query", termQuery("field1", "dog")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("5") + .get(); + prepareIndex("test").setId("5") .setSource(jsonBuilder().startObject().field("id", "5").field("query", termQuery("field1", "fox")).endObject()) - .execute() - .actionGet(); + .get(); indicesAdmin().prepareRefresh().get(); BytesReference document = BytesReference.bytes( jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject() ); - SearchResponse searchResponse = prepareSearch().setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) - .highlighter(new HighlightBuilder().field("field1")) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(searchResponse, 5); - - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), - equalTo("The quick brown fox jumps over the lazy dog") + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) + .highlighter(new HighlightBuilder().field("field1")) + .addSort("id", SortOrder.ASC), + searchResponse -> { + assertHitCount(searchResponse, 5); + + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog") + ); + } ); BytesReference document1 = BytesReference.bytes( jsonBuilder().startObject().field("field1", "The quick brown fox jumps").endObject() ); BytesReference document2 = BytesReference.bytes(jsonBuilder().startObject().field("field1", "over the lazy dog").endObject()); - searchResponse = prepareSearch().setQuery( - boolQuery().should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) - .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - logger.info("searchResponse={}", searchResponse); - assertHitCount(searchResponse, 5); - - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("query2_field1").fragments()[0].string(), - equalTo("over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("query2_field1").fragments()[0].string(), - equalTo("over the lazy dog") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query1_field1").fragments()[0].string(), - equalTo("The quick brown fox jumps") - ); - - searchResponse = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) - ), - XContentType.JSON - ) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - assertHitCount(searchResponse, 5); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), - equalTo(Arrays.asList(1, 3)) - ); - assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("1_field1").fragments()[0].string(), equalTo("fox")); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("3_field1").fragments()[0].string(), - equalTo("brown fox") - ); - assertThat( - searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat(searchResponse.getHits().getAt(1).getHighlightFields().get("0_field1").fragments()[0].string(), equalTo("dog")); - assertThat( - searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(2)) - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("2_field1").fragments()[0].string(), - equalTo("jumps") - ); - assertThat( - searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("0_field1").fragments()[0].string(), equalTo("dog")); - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot").getValues(), - equalTo(Arrays.asList(1, 3)) - ); - assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("1_field1").fragments()[0].string(), equalTo("fox")); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("3_field1").fragments()[0].string(), - equalTo("brown fox") + assertResponse( + prepareSearch().setQuery( + boolQuery().should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) + .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("query2_field1").fragments()[0].string(), + equalTo("over the lazy dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query1_field1").fragments()[0].string(), + equalTo("The quick brown fox jumps") + ); + } ); - searchResponse = prepareSearch().setQuery( - boolQuery().should( + assertResponse( + prepareSearch().setQuery( new PercolateQueryBuilder( "query", Arrays.asList( BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()) + BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) ), XContentType.JSON - ).setName("query1") - ) - .should( + ) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + assertHitCount(searchResponse, 5); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3)) + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox") + ); + assertThat( + searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog") + ); + assertThat( + searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(2)) + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("2_field1").fragments()[0].string(), + equalTo("jumps") + ); + assertThat( + searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("0_field1").fragments()[0].string(), + equalTo("dog") + ); + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(1, 3)) + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("3_field1").fragments()[0].string(), + equalTo("brown fox") + ); + } + ); + + assertResponse( + prepareSearch().setQuery( + boolQuery().should( new PercolateQueryBuilder( "query", Arrays.asList( - BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), - BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) + BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()) ), XContentType.JSON - ).setName("query2") + ).setName("query1") ) - ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC).get(); - logger.info("searchResponse={}", searchResponse); - assertHitCount(searchResponse, 5); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query1_1_field1").fragments()[0].string(), - equalTo("fox") - ); - assertThat( - searchResponse.getHits().getAt(0).getHighlightFields().get("query2_1_field1").fragments()[0].string(), - equalTo("brown fox") - ); - - assertThat( - searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(1).getHighlightFields().get("query1_0_field1").fragments()[0].string(), - equalTo("dog") - ); - - assertThat( - searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(2).getHighlightFields().get("query2_0_field1").fragments()[0].string(), - equalTo("jumps") - ); - - assertThat( - searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(0)) - ); - assertThat( - searchResponse.getHits().getAt(3).getHighlightFields().get("query1_0_field1").fragments()[0].string(), - equalTo("dog") - ); - - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query1").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query2").getValues(), - equalTo(Collections.singletonList(1)) - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query1_1_field1").fragments()[0].string(), - equalTo("fox") - ); - assertThat( - searchResponse.getHits().getAt(4).getHighlightFields().get("query2_1_field1").fragments()[0].string(), - equalTo("brown fox") + .should( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), + BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) + ), + XContentType.JSON + ).setName("query2") + ) + ).highlighter(new HighlightBuilder().field("field1")).addSort("id", SortOrder.ASC), + searchResponse -> { + logger.info("searchResponse={}", searchResponse); + assertHitCount(searchResponse, 5); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(0).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox") + ); + + assertThat( + searchResponse.getHits().getAt(1).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(1).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog") + ); + + assertThat( + searchResponse.getHits().getAt(2).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(2).getHighlightFields().get("query2_0_field1").fragments()[0].string(), + equalTo("jumps") + ); + + assertThat( + searchResponse.getHits().getAt(3).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(0)) + ); + assertThat( + searchResponse.getHits().getAt(3).getHighlightFields().get("query1_0_field1").fragments()[0].string(), + equalTo("dog") + ); + + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query1").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(4).getFields().get("_percolator_document_slot_query2").getValues(), + equalTo(Collections.singletonList(1)) + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query1_1_field1").fragments()[0].string(), + equalTo("fox") + ); + assertThat( + searchResponse.getHits().getAt(4).getHighlightFields().get("query2_1_field1").fragments()[0].string(), + equalTo("brown fox") + ); + } ); } @@ -733,21 +756,23 @@ public void testTakePositionOffsetGapIntoAccount() throws Exception { assertAcked( indicesAdmin().prepareCreate("test").setMapping("field", "type=text,position_increment_gap=5", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", new MatchPhraseQueryBuilder("field", "brown fox").slop(4)).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("query", new MatchPhraseQueryBuilder("field", "brown fox").slop(5)).endObject()) .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setQuery( - new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) - ).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + } + ); } public void testManyPercolatorFields() throws Exception { @@ -808,12 +833,10 @@ public void testWithMultiplePercolatorFields() throws Exception { ); // Acceptable: - client().prepareIndex("test1") - .setId("1") + prepareIndex("test1").setId("1") .setSource(jsonBuilder().startObject().field(queryFieldName, matchQuery("field", "value")).endObject()) .get(); - client().prepareIndex("test2") - .setId("1") + prepareIndex("test2").setId("1") .setSource( jsonBuilder().startObject() .startObject("object_field") @@ -825,24 +848,28 @@ public void testWithMultiplePercolatorFields() throws Exception { indicesAdmin().prepareRefresh().get(); BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field", "value").endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)) - .setIndices("test1") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)).setIndices("test1"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); + } + ); - response = prepareSearch().setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) - .setIndices("test2") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getIndex(), equalTo("test2")); + assertResponse( + prepareSearch().setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) + .setIndices("test2"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getIndex(), equalTo("test2")); + } + ); // Unacceptable: DocumentParsingException e = expectThrows(DocumentParsingException.class, () -> { - client().prepareIndex("test2") - .setId("1") + prepareIndex("test2").setId("1") .setSource( jsonBuilder().startObject() .startArray("object_field") @@ -885,8 +912,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endObject() .endObject(); assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping)); - client().prepareIndex("test") - .setId("q1") + prepareIndex("test").setId("q1") .setSource( jsonBuilder().startObject() .field("id", "q1") @@ -902,8 +928,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { ) .get(); // this query should never match as it doesn't use nested query: - client().prepareIndex("test") - .setId("q2") + prepareIndex("test").setId("q2") .setSource( jsonBuilder().startObject() .field("id", "q2") @@ -913,73 +938,15 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .get(); indicesAdmin().prepareRefresh().get(); - client().prepareIndex("test") - .setId("q3") + prepareIndex("test").setId("q3") .setSource(jsonBuilder().startObject().field("id", "q3").field("query", QueryBuilders.matchAllQuery()).endObject()) .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "stark") - .startArray("employee") - .startObject() - .field("name", "virginia potts") - .endObject() - .startObject() - .field("name", "tony stark") - .endObject() - .endArray() - .endObject() - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "notstark") - .startArray("employee") - .startObject() - .field("name", "virginia stark") - .endObject() - .startObject() - .field("name", "tony stark") - .endObject() - .endArray() - .endObject() - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject()), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); - - response = prepareSearch().setQuery( - new PercolateQueryBuilder( - "query", - Arrays.asList( + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() @@ -994,149 +961,214 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endArray() .endObject() ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() - .field("companyname", "stark") + .field("companyname", "notstark") .startArray("employee") .startObject() - .field("name", "peter parker") + .field("name", "virginia stark") .endObject() .startObject() - .field("name", "virginia potts") + .field("name", "tony stark") .endObject() .endArray() .endObject() ), - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("companyname", "stark") - .startArray("employee") - .startObject() - .field("name", "peter parker") - .endObject() - .endArray() - .endObject() - ) - ), - XContentType.JSON - ) - ).addSort("id", SortOrder.ASC).get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); - assertThat(response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1, 2))); + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject()), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("q3")); + } + ); + + assertResponse( + prepareSearch().setQuery( + new PercolateQueryBuilder( + "query", + Arrays.asList( + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "virginia potts") + .endObject() + .startObject() + .field("name", "tony stark") + .endObject() + .endArray() + .endObject() + ), + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "peter parker") + .endObject() + .startObject() + .field("name", "virginia potts") + .endObject() + .endArray() + .endObject() + ), + BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .field("companyname", "stark") + .startArray("employee") + .startObject() + .field("name", "peter parker") + .endObject() + .endArray() + .endObject() + ) + ), + XContentType.JSON + ) + ).addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); + assertThat( + response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1)) + ); + assertThat(response.getHits().getAt(1).getId(), equalTo("q3")); + assertThat( + response.getHits().getAt(1).getFields().get("_percolator_document_slot").getValues(), + equalTo(Arrays.asList(0, 1, 2)) + ); + } + ); } public void testPercolatorQueryViaMultiSearch() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=text", "query", "type=percolator")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("3") + .get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()).get(); + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("query", boolQuery().must(matchQuery("field1", "b")).must(matchQuery("field1", "c"))) .endObject() ) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("4") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("5") - .setSource(jsonBuilder().startObject().field("field1", "c").endObject()) - .execute() - .actionGet(); + .get(); + prepareIndex("test").setId("4").setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()).get(); + prepareIndex("test").setId("5").setSource(jsonBuilder().startObject().field("field1", "c").endObject()).get(); indicesAdmin().prepareRefresh().get(); - MultiSearchResponse response = client().prepareMultiSearch() - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(jsonBuilder().startObject().field("field1", "b").endObject()), - XContentType.JSON + assertResponse( + client().prepareMultiSearch() + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(jsonBuilder().startObject().field("field1", "b").endObject()), + XContentType.JSON + ) ) ) - ) - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(yamlBuilder().startObject().field("field1", "c").endObject()), - XContentType.YAML + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(yamlBuilder().startObject().field("field1", "c").endObject()), + XContentType.YAML + ) ) ) - ) - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(jsonBuilder().startObject().field("field1", "b c").endObject()), - XContentType.JSON + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(jsonBuilder().startObject().field("field1", "b c").endObject()), + XContentType.JSON + ) ) ) - ) - .add( - prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "query", - BytesReference.bytes(jsonBuilder().startObject().field("field1", "d").endObject()), - XContentType.JSON + .add( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "query", + BytesReference.bytes(jsonBuilder().startObject().field("field1", "d").endObject()), + XContentType.JSON + ) ) ) - ) - .add(prepareSearch("test").setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null))) - .add( - prepareSearch("test") // non existing doc, so error element - .setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)) - ) - .get(); - - MultiSearchResponse.Item item = response.getResponses()[0]; - assertHitCount(item.getResponse(), 2L); - assertSearchHits(item.getResponse(), "1", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[1]; - assertHitCount(item.getResponse(), 2L); - assertSearchHits(item.getResponse(), "2", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[2]; - assertHitCount(item.getResponse(), 4L); - assertSearchHits(item.getResponse(), "1", "2", "3", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[3]; - assertHitCount(item.getResponse(), 1L); - assertSearchHits(item.getResponse(), "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[4]; - assertHitCount(item.getResponse(), 2L); - assertSearchHits(item.getResponse(), "2", "4"); - assertThat(item.getFailureMessage(), nullValue()); - - item = response.getResponses()[5]; - assertThat(item.getResponse(), nullValue()); - assertThat(item.getFailureMessage(), notNullValue()); - assertThat(item.getFailureMessage(), containsString("[test/6] couldn't be found")); + .add(prepareSearch("test").setQuery(new PercolateQueryBuilder("query", "test", "5", null, null, null))) + .add( + prepareSearch("test") // non existing doc, so error element + .setQuery(new PercolateQueryBuilder("query", "test", "6", null, null, null)) + ), + response -> { + Item item = response.getResponses()[0]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "1", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[1]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "2", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[2]; + assertHitCount(item.getResponse(), 4L); + assertSearchHits(item.getResponse(), "1", "2", "3", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[3]; + assertHitCount(item.getResponse(), 1L); + assertSearchHits(item.getResponse(), "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[4]; + assertHitCount(item.getResponse(), 2L); + assertSearchHits(item.getResponse(), "2", "4"); + assertThat(item.getFailureMessage(), nullValue()); + + item = response.getResponses()[5]; + assertThat(item.getResponse(), nullValue()); + assertThat(item.getFailureMessage(), notNullValue()); + assertThat(item.getFailureMessage(), containsString("[test/6] couldn't be found")); + } + ); } public void testDisallowExpensiveQueries() throws IOException { @@ -1145,18 +1177,18 @@ public void testDisallowExpensiveQueries() throws IOException { indicesAdmin().prepareCreate("test").setMapping("id", "type=keyword", "field1", "type=keyword", "query", "type=percolator") ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("query", matchQuery("field1", "value")).endObject()) .get(); refresh(); // Execute with search.allow_expensive_queries = null => default value = false => success BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); - SearchResponse response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + }); // Set search.allow_expensive_queries to "false" => assert failure updateClusterSettings(Settings.builder().put("search.allow_expensive_queries", false)); @@ -1173,10 +1205,11 @@ public void testDisallowExpensiveQueries() throws IOException { // Set search.allow_expensive_queries setting to "true" ==> success updateClusterSettings(Settings.builder().put("search.allow_expensive_queries", true)); - response = prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + assertResponse(prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); + }); } finally { updateClusterSettings(Settings.builder().putNull("search.allow_expensive_queries")); } @@ -1186,49 +1219,50 @@ public void testWrappedWithConstantScore() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("d", "type=date", "q", "type=percolator")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("q", boolQuery().must(rangeQuery("d").gt("now"))).endObject()) - .execute() - .actionGet(); + .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("q", boolQuery().must(rangeQuery("d").lt("now"))).endObject()) - .execute() - .actionGet(); + .get(); indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q", - BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q", - BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON - ) - ).addSort("_doc", SortOrder.ASC).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q", + BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), + XContentType.JSON + ) + ), + 1 + ); - response = prepareSearch("test").setQuery( - constantScoreQuery( + assertHitCount( + prepareSearch("test").setQuery( new PercolateQueryBuilder( "q", BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), XContentType.JSON ) - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); + ).addSort("_doc", SortOrder.ASC), + 1 + ); + assertHitCount( + prepareSearch("test").setQuery( + constantScoreQuery( + new PercolateQueryBuilder( + "q", + BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), + XContentType.JSON + ) + ) + ), + 1 + ); } public void testWithWildcardFieldNames() throws Exception { @@ -1248,8 +1282,7 @@ public void testWithWildcardFieldNames() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("q_simple", simpleQueryStringQuery("yada").fields(Map.of("text*", 1f))) @@ -1259,44 +1292,51 @@ public void testWithWildcardFieldNames() throws Exception { .endObject() ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute() - .actionGet(); - - SearchResponse response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_simple", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_string", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_match", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = prepareSearch("test").setQuery( - new PercolateQueryBuilder( - "q_combo", - BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), - XContentType.JSON - ) - ).get(); - assertEquals(1, response.getHits().getTotalHits().value); + .get(); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_simple", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_string", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_match", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); + + assertHitCount( + prepareSearch("test").setQuery( + new PercolateQueryBuilder( + "q_combo", + BytesReference.bytes(jsonBuilder().startObject().field("text_1", "yada").endObject()), + XContentType.JSON + ) + ), + 1 + ); } public void testKnnQueryNotSupportedInPercolator() throws IOException { @@ -1320,8 +1360,7 @@ public void testKnnQueryNotSupportedInPercolator() throws IOException { ensureGreen(); QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, null); - IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index1") - .setId("knn_query1") + IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("knn_query1") .setSource(jsonBuilder().startObject().field("my_query", knnVectorQueryBuilder).endObject()); DocumentParsingException exception = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get()); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 65b2e257de0b1..05a935229246d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -84,6 +84,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentFactory; @@ -215,93 +216,95 @@ public void testDuel() throws Exception { } Collections.sort(intValues); - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - MappedFieldType intFieldType = mapperService.fieldType("int_field"); - - List> queryFunctions = new ArrayList<>(); - queryFunctions.add(MatchNoDocsQuery::new); - queryFunctions.add(MatchAllDocsQuery::new); - queryFunctions.add(() -> new TermQuery(new Term("unknown_field", "value"))); - String field1 = randomFrom(stringFields); - queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(stringContent.get(field1))))); - String field2 = randomFrom(stringFields); - queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(stringContent.get(field2))))); - queryFunctions.add(() -> intFieldType.termQuery(randomFrom(intValues), context)); - queryFunctions.add(() -> intFieldType.termsQuery(Arrays.asList(randomFrom(intValues), randomFrom(intValues)), context)); - queryFunctions.add( - () -> intFieldType.rangeQuery( - intValues.get(4), - intValues.get(intValues.size() - 4), - true, - true, - ShapeRelation.WITHIN, - null, - null, - context - ) - ); - queryFunctions.add( - () -> new TermInSetQuery( - field1, - new BytesRef(randomFrom(stringContent.get(field1))), - new BytesRef(randomFrom(stringContent.get(field1))) - ) - ); - queryFunctions.add( - () -> new TermInSetQuery( - field2, - new BytesRef(randomFrom(stringContent.get(field1))), - new BytesRef(randomFrom(stringContent.get(field1))) - ) - ); - // many iterations with boolean queries, which are the most complex queries to deal with when nested - int numRandomBoolQueries = 1000; - for (int i = 0; i < numRandomBoolQueries; i++) { - queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues, context)); - } - queryFunctions.add(() -> { - int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); - List clauses = new ArrayList<>(); - for (int i = 0; i < numClauses; i++) { - String field = randomFrom(stringFields); - clauses.add(new TermQuery(new Term(field, randomFrom(stringContent.get(field))))); - } - return new DisjunctionMaxQuery(clauses, 0.01f); - }); - queryFunctions.add(() -> { - Float minScore = randomBoolean() ? null : (float) randomIntBetween(1, 1000); - Query innerQuery; - if (randomBoolean()) { - innerQuery = new TermQuery(new Term(field1, randomFrom(stringContent.get(field1)))); - } else { - innerQuery = new PhraseQuery(field1, randomFrom(stringContent.get(field1)), randomFrom(stringContent.get(field1))); + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + MappedFieldType intFieldType = mapperService.fieldType("int_field"); + + List> queryFunctions = new ArrayList<>(); + queryFunctions.add(MatchNoDocsQuery::new); + queryFunctions.add(MatchAllDocsQuery::new); + queryFunctions.add(() -> new TermQuery(new Term("unknown_field", "value"))); + String field1 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(stringContent.get(field1))))); + String field2 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(stringContent.get(field2))))); + queryFunctions.add(() -> intFieldType.termQuery(randomFrom(intValues), context)); + queryFunctions.add(() -> intFieldType.termsQuery(Arrays.asList(randomFrom(intValues), randomFrom(intValues)), context)); + queryFunctions.add( + () -> intFieldType.rangeQuery( + intValues.get(4), + intValues.get(intValues.size() - 4), + true, + true, + ShapeRelation.WITHIN, + null, + null, + context + ) + ); + queryFunctions.add( + () -> new TermInSetQuery( + field1, + new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))) + ) + ); + queryFunctions.add( + () -> new TermInSetQuery( + field2, + new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))) + ) + ); + // many iterations with boolean queries, which are the most complex queries to deal with when nested + int numRandomBoolQueries = 1000; + for (int i = 0; i < numRandomBoolQueries; i++) { + queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues, context)); } - return new FunctionScoreQuery(innerQuery, minScore, 1f); - }); - - List documents = new ArrayList<>(); - for (Supplier queryFunction : queryFunctions) { - Query query = queryFunction.get(); - addQuery(query, documents); - } + queryFunctions.add(() -> { + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); + List clauses = new ArrayList<>(); + for (int i = 0; i < numClauses; i++) { + String field = randomFrom(stringFields); + clauses.add(new TermQuery(new Term(field, randomFrom(stringContent.get(field))))); + } + return new DisjunctionMaxQuery(clauses, 0.01f); + }); + queryFunctions.add(() -> { + Float minScore = randomBoolean() ? null : (float) randomIntBetween(1, 1000); + Query innerQuery; + if (randomBoolean()) { + innerQuery = new TermQuery(new Term(field1, randomFrom(stringContent.get(field1)))); + } else { + innerQuery = new PhraseQuery(field1, randomFrom(stringContent.get(field1)), randomFrom(stringContent.get(field1))); + } + return new FunctionScoreQuery(innerQuery, minScore, 1f); + }); - indexWriter.addDocuments(documents); - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - // Disable query cache, because ControlQuery cannot be cached... - shardSearcher.setQueryCache(null); + List documents = new ArrayList<>(); + for (Supplier queryFunction : queryFunctions) { + Query query = queryFunction.get(); + addQuery(query, documents); + } - LuceneDocument document = new LuceneDocument(); - for (Map.Entry> entry : stringContent.entrySet()) { - String value = entry.getValue().stream().collect(Collectors.joining(" ")); - document.add(new TextField(entry.getKey(), value, Field.Store.NO)); - } - for (Integer intValue : intValues) { - NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", intValue, true, true, false); + indexWriter.addDocuments(documents); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + // Disable query cache, because ControlQuery cannot be cached... + shardSearcher.setQueryCache(null); + + LuceneDocument document = new LuceneDocument(); + for (Map.Entry> entry : stringContent.entrySet()) { + String value = entry.getValue().stream().collect(Collectors.joining(" ")); + document.add(new TextField(entry.getKey(), value, Field.Store.NO)); + } + for (Integer intValue : intValues) { + NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", intValue, true, true, false); + } + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); } - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); } private BooleanQuery createRandomBooleanQuery( @@ -376,53 +379,55 @@ public void testDuel2() throws Exception { ranges.add(new int[] { 0, 10 }); ranges.add(new int[] { 15, 50 }); - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - List documents = new ArrayList<>(); - { - addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); - } - { - addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents); - } - { - int[] range = randomFrom(ranges); - Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, context); - addQuery(rangeQuery, documents); - } - { - int numBooleanQueries = randomIntBetween(1, 5); - for (int i = 0; i < numBooleanQueries; i++) { - Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType, context); - addQuery(randomBQ, documents); + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + List documents = new ArrayList<>(); + { + addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); + } + { + addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents); + } + { + int[] range = randomFrom(ranges); + Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, context); + addQuery(rangeQuery, documents); + } + { + int numBooleanQueries = randomIntBetween(1, 5); + for (int i = 0; i < numBooleanQueries; i++) { + Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType, context); + addQuery(randomBQ, documents); + } + } + { + addQuery(new MatchNoDocsQuery(), documents); + } + { + addQuery(new MatchAllDocsQuery(), documents); } - } - { - addQuery(new MatchNoDocsQuery(), documents); - } - { - addQuery(new MatchAllDocsQuery(), documents); - } - - indexWriter.addDocuments(documents); - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - // Disable query cache, because ControlQuery cannot be cached... - shardSearcher.setQueryCache(null); - LuceneDocument document = new LuceneDocument(); - for (String value : stringValues) { - document.add(new TextField("string_field", value, Field.Store.NO)); - logger.info("Test with document: {}" + document); - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); - } + indexWriter.addDocuments(documents); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + // Disable query cache, because ControlQuery cannot be cached... + shardSearcher.setQueryCache(null); + + LuceneDocument document = new LuceneDocument(); + for (String value : stringValues) { + document.add(new TextField("string_field", value, Field.Store.NO)); + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } - for (int[] range : ranges) { - NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", between(range[0], range[1]), true, true, false); - logger.info("Test with document: {}" + document); - MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); - duelRun(queryStore, memoryIndex, shardSearcher); + for (int[] range : ranges) { + NumberFieldMapper.NumberType.INTEGER.addFields(document, "int_field", between(range[0], range[1]), true, true, false); + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index b47364e3b1a08..46b9e365fd0ea 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -81,6 +81,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.DummyQueryParserPlugin; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.xcontent.XContentBuilder; @@ -268,76 +269,79 @@ public void testExtractTerms() throws Exception { } public void testExtractRanges() throws Exception { - SearchExecutionContext context = createSearchContext(indexService).getSearchExecutionContext(); - addQueryFieldMappings(); - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - Query rangeQuery1 = mapperService.fieldType("number_field1").rangeQuery(10, 20, true, true, null, null, null, context); - bq.add(rangeQuery1, Occur.MUST); - Query rangeQuery2 = mapperService.fieldType("number_field1").rangeQuery(15, 20, true, true, null, null, null, context); - bq.add(rangeQuery2, Occur.MUST); - - DocumentMapper documentMapper = mapperService.documentMapper(); - PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - DocumentParserContext documentParserContext = new TestDocumentParserContext(); - fieldMapper.processQuery(bq.build(), documentParserContext); - LuceneDocument document = documentParserContext.doc(); - - PercolatorFieldMapper.PercolatorFieldType percolatorFieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); - assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); - List fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); - fields.sort(Comparator.comparing(IndexableField::binaryValue)); - assertThat( - fields, - transformedItemsMatch( - b -> b.binaryValue().bytes, - contains( - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) - ), - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(15)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + try (SearchContext searchContext = createSearchContext(indexService)) { + SearchExecutionContext context = searchContext.getSearchExecutionContext(); + addQueryFieldMappings(); + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + Query rangeQuery1 = mapperService.fieldType("number_field1").rangeQuery(10, 20, true, true, null, null, null, context); + bq.add(rangeQuery1, Occur.MUST); + Query rangeQuery2 = mapperService.fieldType("number_field1").rangeQuery(15, 20, true, true, null, null, null, context); + bq.add(rangeQuery2, Occur.MUST); + + DocumentMapper documentMapper = mapperService.documentMapper(); + PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); + DocumentParserContext documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + LuceneDocument document = documentParserContext.doc(); + + PercolatorFieldMapper.PercolatorFieldType percolatorFieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper + .fieldType(); + assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); + List fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); + fields.sort(Comparator.comparing(IndexableField::binaryValue)); + assertThat( + fields, + transformedItemsMatch( + b -> b.binaryValue().bytes, + contains( + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ), + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(15)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ) ) ) - ) - ); - - fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); - assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(1L))); - - // Range queries on different fields: - bq = new BooleanQuery.Builder(); - bq.add(rangeQuery1, Occur.MUST); - rangeQuery2 = mapperService.fieldType("number_field2").rangeQuery(15, 20, true, true, null, null, null, context); - bq.add(rangeQuery2, Occur.MUST); - - documentParserContext = new TestDocumentParserContext(); - fieldMapper.processQuery(bq.build(), documentParserContext); - document = documentParserContext.doc(); + ); - assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); - fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); - fields.sort(Comparator.comparing(IndexableField::binaryValue)); - assertThat( - fields, - transformedItemsMatch( - b -> b.binaryValue().bytes, - contains( - allOf( - transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), - transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) - ), - allOf( - transformedMatch(b -> LongPoint.decodeDimension(b, 8), equalTo(15L)), - transformedMatch(b -> LongPoint.decodeDimension(b, 24), equalTo(20L)) + fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); + assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(1L))); + + // Range queries on different fields: + bq = new BooleanQuery.Builder(); + bq.add(rangeQuery1, Occur.MUST); + rangeQuery2 = mapperService.fieldType("number_field2").rangeQuery(15, 20, true, true, null, null, null, context); + bq.add(rangeQuery2, Occur.MUST); + + documentParserContext = new TestDocumentParserContext(); + fieldMapper.processQuery(bq.build(), documentParserContext); + document = documentParserContext.doc(); + + assertThat(document.getField(percolatorFieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); + fields = new ArrayList<>(document.getFields(percolatorFieldType.rangeField.name())); + fields.sort(Comparator.comparing(IndexableField::binaryValue)); + assertThat( + fields, + transformedItemsMatch( + b -> b.binaryValue().bytes, + contains( + allOf( + transformedMatch(b -> IntPoint.decodeDimension(b, 12), equalTo(10)), + transformedMatch(b -> IntPoint.decodeDimension(b, 28), equalTo(20)) + ), + allOf( + transformedMatch(b -> LongPoint.decodeDimension(b, 8), equalTo(15L)), + transformedMatch(b -> LongPoint.decodeDimension(b, 24), equalTo(20L)) + ) ) ) - ) - ); + ); - fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); - assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(2L))); + fields = new ArrayList<>(document.getFields(percolatorFieldType.minimumShouldMatchField.name())); + assertThat(fields, transformedItemsMatch(IndexableField::numericValue, contains(2L))); + } } public void testExtractTermsAndRanges_failed() throws Exception { @@ -616,7 +620,7 @@ public void testStoringQueries() throws Exception { public void testQueryWithRewrite() throws Exception { addQueryFieldMappings(); - client().prepareIndex("remote").setId("1").setSource("field", "value").get(); + prepareIndex("remote").setId("1").setSource("field", "value").get(); QueryBuilder queryBuilder = termsLookupQuery("field", new TermsLookup("remote", "1", "field")); ParsedDocument doc = mapperService.documentMapper() .parse( diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index 05c2c27de40fc..5f3ff5264497a 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -73,8 +73,7 @@ protected Map, Object>> pluginScripts() { public void testPercolateScriptQuery() throws IOException { indicesAdmin().prepareCreate("index").setMapping("query", "type=percolator").get(); - client().prepareIndex("index") - .setId("1") + prepareIndex("index").setId("1") .setSource( jsonBuilder().startObject() .field( @@ -84,8 +83,7 @@ public void testPercolateScriptQuery() throws IOException { .endObject() ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute() - .actionGet(); + .get(); assertSearchHitsWithoutFailures( client().prepareSearch("index") .setQuery( @@ -126,8 +124,7 @@ public void testPercolateQueryWithNestedDocuments_doNotLeakBitsetCacheEntries() .setSettings(Settings.builder().put(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), false)) .setMapping(mapping) ); - client().prepareIndex("test") - .setId("q1") + prepareIndex("test").setId("q1") .setSource( jsonBuilder().startObject() .field( @@ -215,8 +212,7 @@ public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() mapping.endObject(); createIndex("test", indicesAdmin().prepareCreate("test").setMapping(mapping)); Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "use_fielddata_please", Collections.emptyMap()); - client().prepareIndex("test") - .setId("q1") + prepareIndex("test").setId("q1") .setSource( jsonBuilder().startObject() .field("query", QueryBuilders.nestedQuery("employees", QueryBuilders.scriptQuery(script), ScoreMode.Avg)) @@ -258,8 +254,7 @@ public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() public void testMapUnmappedFieldAsText() throws IOException { Settings.Builder settings = Settings.builder().put("index.percolator.map_unmapped_fields_as_text", true); createIndex("test", settings.build(), "query", "query", "type=percolator"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()) .get(); indicesAdmin().prepareRefresh().get(); @@ -290,12 +285,10 @@ public void testRangeQueriesWithNow() throws Exception { "type=percolator" ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("query", rangeQuery("field2").from("now-1h").to("now+1h")).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field( @@ -307,8 +300,7 @@ public void testRangeQueriesWithNow() throws Exception { .get(); Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "1==1", Collections.emptyMap()); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("query", boolQuery().filter(scriptQuery(script)).filter(rangeQuery("field2").from("now-1h").to("now+1h"))) diff --git a/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index 7e879d9959f6d..699cb307e3310 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -49,18 +49,15 @@ public void setup() { createIndex(TEST_INDEX); ensureGreen(); - client().prepareIndex(TEST_INDEX) - .setId("1") - .setSource("id", 1, "text", "berlin", "title", "Berlin, Germany", "population", 3670622) - .get(); - client().prepareIndex(TEST_INDEX).setId("2").setSource("id", 2, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("3").setSource("id", 3, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("4").setSource("id", 4, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("5").setSource("id", 5, "text", "amsterdam", "population", 851573).get(); - client().prepareIndex(TEST_INDEX).setId("6").setSource("id", 6, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("1").setSource("id", 1, "text", "berlin", "title", "Berlin, Germany", "population", 3670622).get(); + prepareIndex(TEST_INDEX).setId("2").setSource("id", 2, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("3").setSource("id", 3, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("4").setSource("id", 4, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("5").setSource("id", 5, "text", "amsterdam", "population", 851573).get(); + prepareIndex(TEST_INDEX).setId("6").setSource("id", 6, "text", "amsterdam", "population", 851573).get(); // add another index for testing closed indices etc... - client().prepareIndex("test2").setId("7").setSource("id", 7, "text", "amsterdam", "population", 851573).get(); + prepareIndex("test2").setId("7").setSource("id", 7, "text", "amsterdam", "population", 851573).get(); refresh(); // set up an alias that can also be used in tests diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 50284008eef48..996fbde85e474 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -272,7 +272,7 @@ private ReindexRequestBuilder reindexAndPartiallyBlock() throws Exception { false, true, IntStream.range(0, numDocs) - .mapToObj(i -> client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("n", Integer.toString(i))) + .mapToObj(i -> prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("n", Integer.toString(i))) .collect(Collectors.toList()) ); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java index d7f71fcc510ab..7dad062ab3bca 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -53,6 +52,7 @@ import static org.elasticsearch.common.lucene.uid.Versions.MATCH_DELETED; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -181,7 +181,7 @@ public void testDeleteByQuery() throws Exception { source.put(RETURN_NOOP_FIELD, true); noopDocs++; } - indexRequests.add(client().prepareIndex(sourceIndex).setId(Integer.toString(i)).setSource(source)); + indexRequests.add(prepareIndex(sourceIndex).setId(Integer.toString(i)).setSource(source)); } indexRandom(true, indexRequests); @@ -201,16 +201,18 @@ public void testDeleteByQuery() throws Exception { // Ensure that the write thread blocking task is currently executing barrier.await(); - final SearchResponse searchResponse = prepareSearch(sourceIndex).setSize(numDocs) // Get all indexed docs - .addSort(SORTING_FIELD, SortOrder.DESC) - .execute() - .actionGet(); - - // Modify a subset of the target documents concurrently - final List originalDocs = Arrays.asList(searchResponse.getHits().getHits()); int conflictingOps = randomIntBetween(maxDocs, numDocs); - final List docsModifiedConcurrently = randomSubsetOf(conflictingOps, originalDocs); - + final int finalConflictingOps = conflictingOps; + final List docsModifiedConcurrently = new ArrayList<>(); + assertResponse( + prepareSearch(sourceIndex).setSize(numDocs) // Get all indexed docs + .addSort(SORTING_FIELD, SortOrder.DESC), + response -> { + // Modify a subset of the target documents concurrently + final List originalDocs = Arrays.asList(response.getHits().getHits()); + docsModifiedConcurrently.addAll(randomSubsetOf(finalConflictingOps, originalDocs)); + } + ); BulkRequest conflictingUpdatesBulkRequest = new BulkRequest(); for (SearchHit searchHit : docsModifiedConcurrently) { if (scriptEnabled && searchHit.getSourceAsMap().containsKey(RETURN_NOOP_FIELD)) { diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java index 2eed369a64d6a..3bbc8e4b969ee 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/AbstractFeatureMigrationIntegTest.java @@ -192,7 +192,7 @@ public void createSystemIndexForDescriptor(SystemIndexDescriptor descriptor) thr List docs = new ArrayList<>(INDEX_DOC_COUNT); for (int i = 0; i < INDEX_DOC_COUNT; i++) { - docs.add(ESIntegTestCase.client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("some_field", "words words")); + docs.add(ESIntegTestCase.prepareIndex(indexName).setId(Integer.toString(i)).setSource("some_field", "words words")); } indexRandom(true, docs); IndicesStatsResponse indexStats = ESIntegTestCase.indicesAdmin().prepareStats(indexName).setDocs(true).get(); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index 14647820e71f6..0c1a0e41206c7 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -396,27 +396,29 @@ private void migrateWithTemplatesV2(String prefix, SystemIndexDescriptor... desc ); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("a-ct").componentTemplate(ct)).get(); - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(prefix + "*"), - new Template( - null, - new CompressedXContent( - "{\n" - + " \"dynamic\": false,\n" - + " \"properties\": {\n" - + " \"field2\": {\n" - + " \"type\": \"keyword\"\n" - + " }\n" - + " }\n" - + " }" - ), - null - ), - Collections.singletonList("a-ct"), - 4L, - 5L, - Collections.singletonMap("baz", "thud") - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(prefix + "*")) + .template( + new Template( + null, + new CompressedXContent( + "{\n" + + " \"dynamic\": false,\n" + + " \"properties\": {\n" + + " \"field2\": {\n" + + " \"type\": \"keyword\"\n" + + " }\n" + + " }\n" + + " }" + ), + null + ) + ) + .componentTemplates(Collections.singletonList("a-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("baz", "thud")) + .build(); client().execute(PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("a-it").indexTemplate(cit)) .get(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java index 952dd0585e7ba..8e7fab68ac697 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; @@ -64,9 +64,9 @@ protected RestChannelConsumer doPrepareRequest(RestRequest request, NodeClient c if (validationException != null) { throw validationException; } - final var responseFuture = new ListenableActionFuture(); - final var task = client.executeLocally(action, internal, responseFuture); - responseFuture.addListener(new LoggingTaskListener<>(task)); + final var responseListener = new SubscribableListener(); + final var task = client.executeLocally(action, internal, responseListener); + responseListener.addListener(new LoggingTaskListener<>(task)); return sendTask(client.getLocalNodeId(), task); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java index 35ad5fe9532cd..b211f7d92f51f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java @@ -97,7 +97,7 @@ private void testCancel( false, true, IntStream.range(0, numDocs) - .mapToObj(i -> client().prepareIndex().setIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) + .mapToObj(i -> prepareIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) .collect(Collectors.toList()) ); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index c7c441e3eaff9..0ad1867e75058 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -14,11 +14,11 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.bulk.BackoffPolicy; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.support.AbstractClient; @@ -102,35 +102,39 @@ private void dotestBasicsWithRetry(int retries, int minFailures, int maxFailures hitSource.start(); for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { - client.fail(SearchAction.INSTANCE, new EsRejectedExecutionException()); + client.fail(TransportSearchAction.TYPE, new EsRejectedExecutionException()); if (retry >= retries) { return; } client.awaitOperation(); ++expectedSearchRetries; } - client.validateRequest(SearchAction.INSTANCE, (SearchRequest r) -> assertTrue(r.allowPartialSearchResults() == Boolean.FALSE)); + client.validateRequest(TransportSearchAction.TYPE, (SearchRequest r) -> assertTrue(r.allowPartialSearchResults() == Boolean.FALSE)); SearchResponse searchResponse = createSearchResponse(); - client.respond(SearchAction.INSTANCE, searchResponse); - - for (int i = 0; i < randomIntBetween(1, 10); ++i) { - ScrollableHitSource.AsyncResponse asyncResponse = responses.poll(10, TimeUnit.SECONDS); - assertNotNull(asyncResponse); - assertEquals(responses.size(), 0); - assertSameHits(asyncResponse.response().getHits(), searchResponse.getHits().getHits()); - asyncResponse.done(TimeValue.ZERO); - - for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { - client.fail(SearchScrollAction.INSTANCE, new EsRejectedExecutionException()); - client.awaitOperation(); - ++expectedSearchRetries; + try { + client.respond(TransportSearchAction.TYPE, searchResponse); + + for (int i = 0; i < randomIntBetween(1, 10); ++i) { + ScrollableHitSource.AsyncResponse asyncResponse = responses.poll(10, TimeUnit.SECONDS); + assertNotNull(asyncResponse); + assertEquals(responses.size(), 0); + assertSameHits(asyncResponse.response().getHits(), searchResponse.getHits().getHits()); + asyncResponse.done(TimeValue.ZERO); + + for (int retry = 0; retry < randomIntBetween(minFailures, maxFailures); ++retry) { + client.fail(TransportSearchScrollAction.TYPE, new EsRejectedExecutionException()); + client.awaitOperation(); + ++expectedSearchRetries; + } + + searchResponse = createSearchResponse(); + client.respond(TransportSearchScrollAction.TYPE, searchResponse); } - searchResponse = createSearchResponse(); - client.respond(SearchScrollAction.INSTANCE, searchResponse); + assertEquals(actualSearchRetries.get(), expectedSearchRetries); + } finally { + searchResponse.decRef(); } - - assertEquals(actualSearchRetries.get(), expectedSearchRetries); } public void testScrollKeepAlive() { @@ -150,7 +154,10 @@ public void testScrollKeepAlive() { ); hitSource.startNextScroll(timeValueSeconds(100)); - client.validateRequest(SearchScrollAction.INSTANCE, (SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110)); + client.validateRequest( + TransportSearchScrollAction.TYPE, + (SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110) + ); } private SearchResponse createSearchResponse() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java index 2f2248e304989..fac18c4f6f544 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java @@ -52,13 +52,13 @@ protected Collection> nodePlugins() { public void testBasics() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c"), - client().prepareIndex("test").setId("5").setSource("foo", "d"), - client().prepareIndex("test").setId("6").setSource("foo", "e"), - client().prepareIndex("test").setId("7").setSource("foo", "f") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c"), + prepareIndex("test").setId("5").setSource("foo", "d"), + prepareIndex("test").setId("6").setSource("foo", "e"), + prepareIndex("test").setId("7").setSource("foo", "f") ); assertHitCount(prepareSearch("test").setSize(0), 7); @@ -87,7 +87,7 @@ public void testDeleteByQueryWithOneIndex() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i)).setSource("fields1", 1)); + builders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("fields1", 1)); } indexRandom(true, true, true, builders); @@ -112,7 +112,7 @@ public void testDeleteByQueryWithMultipleIndices() throws Exception { for (int j = 0; j < docs; j++) { boolean candidate = (j < candidates[i]); - builders.add(client().prepareIndex("test-" + i).setId(String.valueOf(j)).setSource("candidate", candidate)); + builders.add(prepareIndex("test-" + i).setId(String.valueOf(j)).setSource("candidate", candidate)); } } indexRandom(true, true, true, builders); @@ -129,7 +129,7 @@ public void testDeleteByQueryWithMultipleIndices() throws Exception { } public void testDeleteByQueryWithMissingIndex() throws Exception { - indexRandom(true, client().prepareIndex("test").setId("1").setSource("foo", "a")); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "a")); assertHitCount(prepareSearch().setSize(0), 1); try { @@ -149,7 +149,7 @@ public void testDeleteByQueryWithRouting() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i)).setRouting(String.valueOf(i)).setSource("field1", 1)); + builders.add(prepareIndex("test").setId(String.valueOf(i)).setRouting(String.valueOf(i)).setSource("field1", 1)); } indexRandom(true, true, true, builders); @@ -177,10 +177,7 @@ public void testDeleteByMatchQuery() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { builders.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setRouting(randomAlphaOfLengthBetween(1, 5)) - .setSource("foo", "bar") + prepareIndex("test").setId(Integer.toString(i)).setRouting(randomAlphaOfLengthBetween(1, 5)).setSource("foo", "bar") ); } indexRandom(true, true, true, builders); @@ -196,7 +193,7 @@ public void testDeleteByMatchQuery() throws Exception { } public void testDeleteByQueryWithDateMath() throws Exception { - indexRandom(true, client().prepareIndex("test").setId("1").setSource("d", "2013-01-01")); + indexRandom(true, prepareIndex("test").setId("1").setSource("d", "2013-01-01")); DeleteByQueryRequestBuilder delete = deleteByQuery().source("test").filter(rangeQuery("d").to("now-1h")); assertThat(delete.refresh(true).get(), matcher().deleted(1L)); @@ -210,7 +207,7 @@ public void testDeleteByQueryOnReadOnlyIndex() throws Exception { final int docs = randomIntBetween(1, 50); List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); + builders.add(prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); } indexRandom(true, true, true, builders); @@ -233,7 +230,7 @@ public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception { final int docs = randomIntBetween(1, 50); List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); + builders.add(prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1)); } indexRandom(true, true, true, builders); @@ -289,13 +286,13 @@ public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception { public void testSlices() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c"), - client().prepareIndex("test").setId("5").setSource("foo", "d"), - client().prepareIndex("test").setId("6").setSource("foo", "e"), - client().prepareIndex("test").setId("7").setSource("foo", "f") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c"), + prepareIndex("test").setId("5").setSource("foo", "d"), + prepareIndex("test").setId("6").setSource("foo", "e"), + prepareIndex("test").setId("7").setSource("foo", "f") ); assertHitCount(prepareSearch("test").setSize(0), 7); @@ -326,7 +323,7 @@ public void testMultipleSources() throws Exception { docs.put(indexName, new ArrayList<>()); int numDocs = between(5, 15); for (int i = 0; i < numDocs; i++) { - docs.get(indexName).add(client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); + docs.get(indexName).add(prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java index 81d00d98b1fec..323b829fe93ff 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java @@ -32,7 +32,7 @@ public void testConcurrentDeleteByQueriesOnDifferentDocs() throws Throwable { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { for (int t = 0; t < threads.length; t++) { - builders.add(client().prepareIndex("test").setSource("field", t)); + builders.add(prepareIndex("test").setSource("field", t)); } } indexRandom(true, true, true, builders); @@ -73,7 +73,7 @@ public void testConcurrentDeleteByQueriesOnSameDocs() throws Throwable { List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i)).setSource("foo", "bar")); + builders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("foo", "bar")); } indexRandom(true, true, true, builders); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java index 45ca5a536f34f..21f6427dcb632 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexBasicTests.java @@ -30,10 +30,10 @@ public class ReindexBasicTests extends ReindexTestCase { public void testFiltering() throws Exception { indexRandom( true, - client().prepareIndex("source").setId("1").setSource("foo", "a"), - client().prepareIndex("source").setId("2").setSource("foo", "a"), - client().prepareIndex("source").setId("3").setSource("foo", "b"), - client().prepareIndex("source").setId("4").setSource("foo", "c") + prepareIndex("source").setId("1").setSource("foo", "a"), + prepareIndex("source").setId("2").setSource("foo", "a"), + prepareIndex("source").setId("3").setSource("foo", "b"), + prepareIndex("source").setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch("source").setSize(0), 4); @@ -63,7 +63,7 @@ public void testCopyMany() throws Exception { List docs = new ArrayList<>(); int max = between(150, 500); for (int i = 0; i < max; i++) { - docs.add(client().prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); + docs.add(prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); } indexRandom(true, docs); @@ -90,7 +90,7 @@ public void testCopyManyWithSlices() throws Exception { List docs = new ArrayList<>(); int max = between(150, 500); for (int i = 0; i < max; i++) { - docs.add(client().prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); + docs.add(prepareIndex("source").setId(Integer.toString(i)).setSource("foo", "a")); } indexRandom(true, docs); @@ -127,7 +127,7 @@ public void testMultipleSources() throws Exception { docs.put(indexName, new ArrayList<>()); int numDocs = between(50, 200); for (int i = 0; i < numDocs; i++) { - docs.get(indexName).add(client().prepareIndex(indexName).setId("id_" + sourceIndex + "_" + i).setSource("foo", "a")); + docs.get(indexName).add(prepareIndex(indexName).setId("id_" + sourceIndex + "_" + i).setSource("foo", "a")); } } @@ -161,10 +161,10 @@ public void testReindexFromComplexDateMathIndexName() throws Exception { String destIndexName = ""; indexRandom( true, - client().prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), - client().prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") + prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), + prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch(sourceIndexName).setSize(0), 4); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java index 5e868598d165e..1da998831ecc2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFailureTests.java @@ -37,7 +37,7 @@ public void testFailuresCauseAbortDefault() throws Exception { * Create the destination index such that the copy will cause a mapping * conflict on every request. */ - indexRandom(true, client().prepareIndex("dest").setId("test").setSource("test", 10) /* Its a string in the source! */); + indexRandom(true, prepareIndex("dest").setId("test").setSource("test", 10) /* Its a string in the source! */); indexDocs(100); @@ -59,7 +59,7 @@ public void testFailuresCauseAbortDefault() throws Exception { public void testAbortOnVersionConflict() throws Exception { // Just put something in the way of the copy. - indexRandom(true, client().prepareIndex("dest").setId("1").setSource("test", "test")); + indexRandom(true, prepareIndex("dest").setId("1").setSource("test", "test")); indexDocs(100); @@ -123,10 +123,10 @@ public void testDateMathResolvesSameIndexName() throws Exception { String destIndexName = ""; indexRandom( true, - client().prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), - client().prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), - client().prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") + prepareIndex(sourceIndexName).setId("1").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("2").setSource("foo", "a"), + prepareIndex(sourceIndexName).setId("3").setSource("foo", "b"), + prepareIndex(sourceIndexName).setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch(sourceIndexName).setSize(0), 4); @@ -140,7 +140,7 @@ public void testDateMathResolvesSameIndexName() throws Exception { private void indexDocs(int count) throws Exception { List docs = new ArrayList<>(count); for (int i = 0; i < count; i++) { - docs.add(client().prepareIndex("source").setId(Integer.toString(i)).setSource("test", "words words")); + docs.add(prepareIndex("source").setId(Integer.toString(i)).setSource("test", "words words")); } indexRandom(true, docs); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java index 5509e44b52a3e..a68c390b8bd80 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -81,7 +81,7 @@ protected Settings nodeSettings() { @Before public void setupSourceIndex() { - client().prepareIndex("source").setSource("test", "test").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("source").setSource("test", "test").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } @Before @@ -199,7 +199,7 @@ public void app ActionListener listener, ActionFilterChain chain ) { - if (false == action.equals(SearchAction.NAME)) { + if (false == action.equals(TransportSearchAction.TYPE.name())) { chain.proceed(task, action, request, listener); return; } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java index 34db459539323..644787446547e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexIdTests.java @@ -59,9 +59,12 @@ private ClusterState stateWithTemplate(Settings.Builder settings) { Template template = new Template(settings.build(), null, null); if (randomBoolean()) { metadata.put("c", new ComponentTemplate(template, null, null)); - metadata.put("c", new ComposableIndexTemplate(List.of("dest_index"), null, List.of("c"), null, null, null)); + metadata.put( + "c", + ComposableIndexTemplate.builder().indexPatterns(List.of("dest_index")).componentTemplates(List.of("c")).build() + ); } else { - metadata.put("c", new ComposableIndexTemplate(List.of("dest_index"), template, null, null, null, null)); + metadata.put("c", ComposableIndexTemplate.builder().indexPatterns(List.of("dest_index")).template(template).build()); } return ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java index 2df8caa4dd2ea..0804cccd8b8f2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexSingleNodeTests.java @@ -30,7 +30,7 @@ protected Collection> getPlugins() { public void testDeprecatedSort() { int max = between(2, 20); for (int i = 0; i < max; i++) { - client().prepareIndex("source").setId(Integer.toString(i)).setSource("foo", i).get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("foo", i).get(); } indicesAdmin().prepareRefresh("source").get(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java index 96f0ff50027af..8e42b29468b5c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexVersioningTests.java @@ -105,7 +105,7 @@ private BulkByScrollResponse reindexCreate() { private void setupSourceAbsent() throws Exception { indexRandom( true, - client().prepareIndex("source").setId("test").setVersionType(EXTERNAL).setVersion(SOURCE_VERSION).setSource("foo", "source") + prepareIndex("source").setId("test").setVersionType(EXTERNAL).setVersion(SOURCE_VERSION).setSource("foo", "source") ); assertEquals(SOURCE_VERSION, client().prepareGet("source", "test").get().getVersion()); @@ -113,10 +113,7 @@ private void setupSourceAbsent() throws Exception { private void setupDest(int version) throws Exception { setupSourceAbsent(); - indexRandom( - true, - client().prepareIndex("dest").setId("test").setVersionType(EXTERNAL).setVersion(version).setSource("foo", "dest") - ); + indexRandom(true, prepareIndex("dest").setId("test").setVersionType(EXTERNAL).setVersion(version).setSource("foo", "dest")); assertEquals(version, client().prepareGet("dest", "test").get().getVersion()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java index 5bbff9da85b20..5f1af05571585 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java @@ -81,7 +81,7 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a List docs = new ArrayList<>(); for (int i = 0; i < numSlices * 10; i++) { - docs.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("foo", "bar")); + docs.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", "bar")); } indexRandom(true, docs); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java index 1e338b28a5d4a..36da25685a7ba 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RetryTests.java @@ -175,13 +175,13 @@ private void testCase( final Settings indexSettings = indexSettings(1, 0).put("index.routing.allocation.include.color", "blue").build(); // Create the source index on the node with small thread pools so we can block them. - indicesAdmin().prepareCreate("source").setSettings(indexSettings).execute().actionGet(); + indicesAdmin().prepareCreate("source").setSettings(indexSettings).get(); // Not all test cases use the dest index but those that do require that it be on the node will small thread pools - indicesAdmin().prepareCreate("dest").setSettings(indexSettings).execute().actionGet(); + indicesAdmin().prepareCreate("dest").setSettings(indexSettings).get(); // Build the test data. Don't use indexRandom because that won't work consistently with such small thread pools. BulkRequestBuilder bulk = client().prepareBulk(); for (int i = 0; i < DOC_COUNT; i++) { - bulk.add(client().prepareIndex("source").setSource("foo", "bar " + i)); + bulk.add(prepareIndex("source").setSource("foo", "bar " + i)); } Retry retry = new Retry(BackoffPolicy.exponentialBackoff(), client().threadPool()); @@ -199,18 +199,21 @@ private void testCase( logger.info("Starting request"); ActionFuture responseListener = builder.execute(); + BulkByScrollResponse response = null; try { logger.info("Waiting for bulk rejections"); assertBusy(() -> assertThat(taskStatus(action).getBulkRetries(), greaterThan(0L))); bulkBlock.await(); logger.info("Waiting for the request to finish"); - BulkByScrollResponse response = responseListener.get(); + response = responseListener.get(); assertThat(response, matcher); assertThat(response.getBulkRetries(), greaterThan(0L)); } finally { // Fetch the response just in case we blew up half way through. This will make sure the failure is thrown up to the top level. - BulkByScrollResponse response = responseListener.get(); + if (response == null) { + response = responseListener.get(); + } assertThat(response.getSearchFailures(), empty()); assertThat(response.getBulkFailures(), empty()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java index f37c9b5891416..6b1f3a21a1aad 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryBasicTests.java @@ -29,10 +29,10 @@ public class UpdateByQueryBasicTests extends ReindexTestCase { public void testBasics() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch("test").setSize(0), 4); assertEquals(1, client().prepareGet("test", "1").get().getVersion()); @@ -69,10 +69,10 @@ public void testBasics() throws Exception { public void testSlices() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("foo", "a"), - client().prepareIndex("test").setId("2").setSource("foo", "a"), - client().prepareIndex("test").setId("3").setSource("foo", "b"), - client().prepareIndex("test").setId("4").setSource("foo", "c") + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c") ); assertHitCount(prepareSearch("test").setSize(0), 4); assertEquals(1, client().prepareGet("test", "1").get().getVersion()); @@ -117,7 +117,7 @@ public void testMultipleSources() throws Exception { docs.put(indexName, new ArrayList<>()); int numDocs = between(5, 15); for (int i = 0; i < numDocs; i++) { - docs.get(indexName).add(client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); + docs.get(indexName).add(prepareIndex(indexName).setId(Integer.toString(i)).setSource("foo", "a")); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java index 11a4476dffa83..5c2e82f6d4256 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWhileModifyingTests.java @@ -32,7 +32,7 @@ public class UpdateByQueryWhileModifyingTests extends ReindexTestCase { public void testUpdateWhileReindexing() throws Exception { AtomicReference value = new AtomicReference<>(randomSimpleString(random())); - indexRandom(true, client().prepareIndex("test").setId("test").setSource("test", value.get())); + indexRandom(true, prepareIndex("test").setId("test").setSource("test", value.get())); AtomicReference failure = new AtomicReference<>(); AtomicBoolean keepUpdating = new AtomicBoolean(true); @@ -56,10 +56,7 @@ public void testUpdateWhileReindexing() throws Exception { GetResponse get = client().prepareGet("test", "test").get(); assertEquals(value.get(), get.getSource().get("test")); value.set(randomSimpleString(random())); - IndexRequestBuilder index = client().prepareIndex("test") - .setId("test") - .setSource("test", value.get()) - .setRefreshPolicy(IMMEDIATE); + IndexRequestBuilder index = prepareIndex("test").setId("test").setSource("test", value.get()).setRefreshPolicy(IMMEDIATE); /* * Update by query changes the document so concurrent * indexes might get version conflict exceptions so we just diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index 1753ba24d5c4a..c2568d9a4db2c 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -30,8 +30,8 @@ versions << [ 'stax2API': '4.2.1', 'woodstox': '6.4.0', - 'reactorNetty': '1.0.24', - 'reactorCore': '3.4.23', + 'reactorNetty': '1.0.39', + 'reactorCore': '3.4.34', 'reactiveStreams': '1.0.4', ] @@ -105,6 +105,7 @@ tasks.named("thirdPartyAudit").configure { 'io.micrometer.core.instrument.DistributionSummary', 'io.micrometer.core.instrument.DistributionSummary$Builder', 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.Meter$Type', 'io.micrometer.core.instrument.MeterRegistry', 'io.micrometer.core.instrument.Metrics', 'io.micrometer.core.instrument.Tag', @@ -117,6 +118,7 @@ tasks.named("thirdPartyAudit").configure { 'io.micrometer.core.instrument.search.Search', 'io.micrometer.core.instrument.Gauge', 'io.micrometer.core.instrument.Gauge$Builder', + 'io.micrometer.context.ContextAccessor', // from reactor-core kotlin extensions (to be deprecated from the library at some point on 3.3.x release) 'kotlin.collections.ArraysKt', diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 2cb4476f528b9..64f20453e1cee 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -105,7 +105,7 @@ protected void createRepository(String repoName) { private void ensureSasTokenPermissions() { final BlobStoreRepository repository = getRepository(); - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); repository.threadPool().generic().execute(ActionRunnable.wrap(future, l -> { final AzureBlobStore blobStore = (AzureBlobStore) repository.blobStore(); final AzureBlobServiceClient azureBlobServiceClient = blobStore.getService().client("default", LocationMode.PRIMARY_ONLY); @@ -136,7 +136,7 @@ public void testMultiBlockUpload() throws Exception { final BlobStoreRepository repo = getRepository(); // The configured threshold for this test suite is 1mb final int blobSize = ByteSizeUnit.MB.toIntBytes(2); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); repo.threadPool().generic().execute(ActionRunnable.run(future, () -> { final BlobContainer blobContainer = repo.blobStore().blobContainer(repo.basePath().add("large_write")); blobContainer.writeBlob( diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index ffb5fd71f0c09..b2df41c69eda7 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -71,7 +71,7 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) .setType("gcs") .setSettings( Settings.builder() diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 51d26a169ad0e..72df453a4e8f9 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -759,38 +759,4 @@ public int read() throws IOException { } } - private static final class PrivilegedWriteChannelStream extends OutputStream { - - private final OutputStream stream; - - PrivilegedWriteChannelStream(WritableByteChannel channel) { - stream = Channels.newOutputStream(channel); - } - - @Override - public void write(int b) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> stream.write(b)); - } - - @Override - public void write(byte[] b) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> stream.write(b)); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> stream.write(b, off, len)); - } - - @Override - public void flush() throws IOException { - SocketAccess.doPrivilegedVoidIOException(stream::flush); - } - - @Override - public void close() throws IOException { - SocketAccess.doPrivilegedVoidIOException(stream::close); - } - } - } diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 87dda19368d5a..8b1f30a1bba61 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -1,11 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License @@ -13,7 +9,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -46,6 +42,13 @@ dependencies { api 'javax.xml.bind:jaxb-api:2.2.2' testImplementation project(':test:fixtures:s3-fixture') + yamlRestTestImplementation project(":test:framework") + yamlRestTestImplementation project(':test:fixtures:s3-fixture') + yamlRestTestImplementation project(':test:fixtures:minio-fixture') + internalClusterTestImplementation project(':test:fixtures:minio-fixture') + + yamlRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" + internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" } restResources { @@ -83,13 +86,6 @@ tasks.named('test').configure { boolean useFixture = false -def fixtureAddress = { fixture, name, port -> - assert useFixture: 'closure should not be used without a fixture' - int ephemeralPort = project(":test:fixtures:${fixture}").postProcessFixture.ext."test.fixtures.${name}.tcp.${port}" - assert ephemeralPort > 0 - 'http://127.0.0.1:' + ephemeralPort -} - // We test against two repositories, one which uses the usual two-part "permanent" credentials and // the other which uses three-part "temporary" or "session" credentials. @@ -123,23 +119,13 @@ if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3P s3PermanentSecretKey = 's3_test_secret_key' s3PermanentBucket = 'bucket' s3PermanentBasePath = 'base_path' - - apply plugin: 'elasticsearch.test.fixtures' useFixture = true - -} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) { - throw new IllegalArgumentException("not all options specified to run against external S3 service as permanent credentials are present") } - if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { s3TemporaryAccessKey = 'session_token_access_key' s3TemporarySecretKey = 'session_token_secret_key' s3TemporaryBucket = 'session_token_bucket' s3TemporaryBasePath = 'session_token_base_path' - s3TemporarySessionToken = 'session_token' - -} else if (!s3TemporaryAccessKey || !s3TemporarySecretKey || !s3TemporaryBucket || !s3TemporaryBasePath || !s3TemporarySessionToken) { - throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present") } if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { @@ -147,18 +133,17 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { s3EC2BasePath = 'ec2_base_path' s3ECSBucket = 'ecs_bucket' s3ECSBasePath = 'ecs_base_path' -} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath) { - throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } if (!s3STSBucket && !s3STSBasePath) { s3STSBucket = 'sts_bucket' s3STSBasePath = 'sts_base_path' -} else if (!s3STSBucket || !s3STSBasePath) { - throw new IllegalArgumentException("not all options specified to run STS tests are present") } tasks.named("processYamlRestTestResources").configure { + from("src/test/resources") { + include "aws-web-identity-token-file" + } Map expansions = [ 'permanent_bucket' : s3PermanentBucket, 'permanent_base_path' : s3PermanentBasePath + "_integration_tests", @@ -182,162 +167,36 @@ tasks.named("internalClusterTest").configure { } tasks.named("yamlRestTest").configure { - systemProperty 'tests.rest.blacklist', ( - useFixture ? - ['repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*'] - : - [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ] - ).join(",") -} - -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture') - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-session-token') - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ec2') + systemProperty("s3PermanentAccessKey", s3PermanentAccessKey) + systemProperty("s3PermanentSecretKey", s3PermanentSecretKey) + systemProperty("s3TemporaryAccessKey", s3TemporaryAccessKey) + systemProperty("s3TemporarySecretKey", s3TemporarySecretKey) + systemProperty("s3EC2AccessKey", s3PermanentAccessKey) - normalization { - runtimeClasspath { - // ignore generated address file for the purposes of build avoidance - ignore 's3Fixture.address' - } - } -} - -testClusters.matching { it.name == "yamlRestTest" }.configureEach { - keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - - keystore 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey - keystore 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey - keystore 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken - - if (useFixture) { - setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture', '80')}" }, IGNORE_VALUE - setting 's3.client.integration_test_temporary.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-session-token', '80')}" }, IGNORE_VALUE - setting 's3.client.integration_test_ec2.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE - - // to redirect InstanceProfileCredentialsProvider to custom auth point - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE - } else { - println "Using an external service to test the repository-s3 plugin" - } -} - -// MinIO -if (useFixture) { - testFixtures.useFixture(':test:fixtures:minio-fixture', 'minio-fixture') - - tasks.register("yamlRestTestMinio", RestIntegTestTask) { - description = "Runs REST tests using the Minio repository." - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - - // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestMinio") } - - testClusters.matching { it.name == "yamlRestTestMinio" }.configureEach { - keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('minio-fixture', 'minio-fixture', '9000')}" }, IGNORE_VALUE - module tasks.named("explodedBundlePlugin") - } -} - -// ECS -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ecs') - tasks.register("yamlRestTestECS", RestIntegTestTask.class) { - description = "Runs tests using the ECS repository." - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestECS") } - - testClusters.matching { it.name == "yamlRestTestECS" }.configureEach { - setting 's3.client.integration_test_ecs.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ecs', '80')}" }, IGNORE_VALUE - module tasks.named('explodedBundlePlugin') - environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ecs', '80')}/ecs_credentials_endpoint" }, IGNORE_VALUE - } -} - -// STS (Secure Token Service) -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-sts') - tasks.register("yamlRestTestSTS", RestIntegTestTask.class) { - description = "Runs tests with the STS (Secure Token Service)" - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestSTS") } - - testClusters.matching { it.name == "yamlRestTestSTS" }.configureEach { - module tasks.named("explodedBundlePlugin") - - setting 's3.client.integration_test_sts.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-sts', '80')}" }, IGNORE_VALUE - systemProperty 'com.amazonaws.sdk.stsMetadataServiceEndpointOverride', - { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-sts', '80')}/assume-role-with-web-identity" }, IGNORE_VALUE - - File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') - // The web identity token can be read only from the plugin config directory because of security restrictions - // Ideally we would create a symlink, but extraConfigFile doesn't support it - extraConfigFile 'repository-s3/aws-web-identity-token-file', awsWebIdentityTokenExternalLocation - environment 'AWS_WEB_IDENTITY_TOKEN_FILE', "$awsWebIdentityTokenExternalLocation" - - // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the S3HttpFixtureWithSTS fixture - environment 'AWS_ROLE_ARN', 'arn:aws:iam::123456789012:role/FederatedWebIdentityRole' - environment 'AWS_ROLE_SESSION_NAME', 'sts-fixture-test' - } + // ideally we could resolve an env path in cluster config as resource similar to configuring a config file + // not sure how common this is, but it would be nice to support + File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') + // The web identity token can be read only from the plugin config directory because of security restrictions + // Ideally we would create a symlink, but extraConfigFile doesn't support it + nonInputProperties.systemProperty("awsWebIdentityTokenExternalLocation", awsWebIdentityTokenExternalLocation.getAbsolutePath()) } // 3rd Party Tests -TaskProvider s3ThirdPartyTest = tasks.register("s3ThirdPartyTest", Test) { +tasks.register("s3ThirdPartyTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet internalTestSourceSet = sourceSets.getByName(InternalClusterTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(internalTestSourceSet.getOutput().getClassesDirs()) setClasspath(internalTestSourceSet.getRuntimeClasspath()) include '**/S3RepositoryThirdPartyTests.class' + systemProperty("tests.use.fixture", Boolean.toString(useFixture)) + + // test container accesses ~/.testcontainers.properties read + systemProperty "tests.security.manager", "false" systemProperty 'test.s3.account', s3PermanentAccessKey systemProperty 'test.s3.key', s3PermanentSecretKey systemProperty 'test.s3.bucket', s3PermanentBucket nonInputProperties.systemProperty 'test.s3.base', s3PermanentBasePath + "_third_party_tests_" + BuildParams.testSeed - if (useFixture) { - nonInputProperties.systemProperty 'test.s3.endpoint', "${-> fixtureAddress('minio-fixture', 'minio-fixture', '9000') }" - } } -tasks.named("check").configure { dependsOn(s3ThirdPartyTest) } tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( @@ -370,3 +229,8 @@ tasks.named("thirdPartyAudit").configure { 'javax.activation.DataHandler' ) } + +tasks.named("check").configure { + dependsOn(tasks.withType(Test)) +} + diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index aee61361ebd10..7f46440647a54 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -57,7 +57,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; @@ -179,7 +179,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } @Override - @TestLogging(reason = "Enable request logging to debug #88841", value = "com.amazonaws.request:DEBUG") + @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/88841", value = "com.amazonaws.request:DEBUG") public void testRequestStats() throws Exception { super.testRequestStats(); } @@ -225,6 +225,7 @@ public void testAbortRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } + @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/101608", value = "com.amazonaws.request:DEBUG") public void testMetrics() throws Exception { // Create the repository and perform some activities final String repository = createRepository(randomRepositoryName()); @@ -281,8 +282,12 @@ public void testMetrics() throws Exception { operation, OperationPurpose.parse((String) metric.attributes().get("purpose")) ); - assertThat(statsCollectors, hasKey(statsKey)); - assertThat(metric.getLong(), equalTo(statsCollectors.get(statsKey).counter.sum())); + assertThat(nodeName + "/" + statsKey + " exists", statsCollectors, hasKey(statsKey)); + assertThat( + nodeName + "/" + statsKey + " has correct sum", + metric.getLong(), + equalTo(statsCollectors.get(statsKey).counter.sum()) + ); aggregatedMetrics.compute(operation.getKey(), (k, v) -> v == null ? metric.getLong() : v + metric.getLong()); }); diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index b9cb2f62f8cfc..b11120e068d14 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -11,6 +11,8 @@ import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; import com.amazonaws.services.s3.model.MultipartUpload; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -23,6 +25,7 @@ import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; @@ -31,8 +34,11 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.fixtures.minio.MinioTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.ClassRule; import java.io.IOException; import java.util.Collection; @@ -48,7 +54,13 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("tests.use.fixture", "true")); + + @ClassRule + public static MinioTestContainer minio = new MinioTestContainer(USE_FIXTURE); @Override protected Collection> getPlugins() { @@ -92,7 +104,7 @@ protected void createRepository(String repoName) { Settings.Builder settings = Settings.builder() .put("bucket", System.getProperty("test.s3.bucket")) .put("base_path", System.getProperty("test.s3.base", "testpath")); - final String endpoint = System.getProperty("test.s3.endpoint"); + final String endpoint = USE_FIXTURE ? minio.getAddress() : System.getProperty("test.s3.endpoint"); if (endpoint != null) { settings.put("endpoint", endpoint); } else { @@ -109,7 +121,7 @@ protected void createRepository(String repoName) { settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) .setType("s3") .setSettings(settings) .get(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index c0b64c5c672f6..87b3c17bfd91c 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -595,13 +595,17 @@ void run(BytesReference expected, BytesReference updated, ActionListenerandThen((l, currentValue) -> ActionListener.completeWith(l, () -> { if (currentValue.isPresent() && currentValue.bytesReference().equals(expected)) { + logger.trace("[{}] completing upload [{}]", blobKey, uploadId); completeMultipartUpload(uploadId, partETag); } else { // Best-effort attempt to clean up after ourselves. + logger.trace("[{}] aborting upload [{}]", blobKey, uploadId); safeAbortMultipartUpload(uploadId); } return currentValue; @@ -635,6 +641,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener { // Best-effort attempt to clean up after ourselves. + logger.trace(() -> Strings.format("[%s] aborting upload [%s] on exception", blobKey, uploadId), e); safeAbortMultipartUpload(uploadId); l.onFailure(e); })); @@ -651,7 +658,10 @@ void run(BytesReference expected, BytesReference updated, ActionListener upload.getInitiated().after(expiryDate))) { + logger.trace("[{}] fresh preexisting uploads vs {}", blobKey, expiryDate); return true; } @@ -674,9 +685,23 @@ private boolean hasPreexistingUploads() { safeAbortMultipartUpload(upload.getUploadId()); } + logger.trace("[{}] stale preexisting uploads vs {}", blobKey, expiryDate); return false; } + private void logUploads(String description, List uploads) { + if (logger.isTraceEnabled()) { + logger.trace( + "[{}] {}: [{}]", + blobKey, + description, + uploads.stream() + .map(multipartUpload -> multipartUpload.getUploadId() + ": " + multipartUpload.getInitiated()) + .collect(Collectors.joining(",")) + ); + } + } + private List listMultipartUploads() { final var listRequest = new ListMultipartUploadsRequest(bucket); listRequest.setPrefix(blobKey); @@ -776,6 +801,7 @@ private void ensureOtherUploadsComplete( } private void cancelOtherUploads(String uploadId, List currentUploads, ActionListener listener) { + logger.trace("[{}] upload [{}] cancelling other uploads", blobKey, uploadId); final var executor = blobStore.getSnapshotExecutor(); try (var listeners = new RefCountingListener(listener)) { for (final var currentUpload : currentUploads) { @@ -826,6 +852,7 @@ public void compareAndExchangeRegister( ) { final var clientReference = blobStore.clientReference(); ActionListener.run(ActionListener.releaseAfter(listener.delegateResponse((delegate, e) -> { + logger.trace(() -> Strings.format("[%s]: compareAndExchangeRegister failed", key), e); if (e instanceof AmazonS3Exception amazonS3Exception && amazonS3Exception.getStatusCode() == 404) { // an uncaught 404 means that our multipart upload was aborted by a concurrent operation before we could complete it delegate.onResponse(OptionalBytesReference.MISSING); @@ -853,6 +880,7 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener Strings.format("[%s]: getRegister failed", key), e); if (e.getStatusCode() == 404) { return OptionalBytesReference.EMPTY; } else { diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index 7d1b495a0f008..ab322786fcd43 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -96,6 +96,13 @@ final class S3ClientSettings { key -> Setting.intSetting(key, 80, 0, 1 << 16, Property.NodeScope) ); + /** The proxy scheme for connecting to S3 through a proxy. */ + static final Setting.AffixSetting PROXY_SCHEME_SETTING = Setting.affixKeySetting( + PREFIX, + "proxy.scheme", + key -> new Setting<>(key, "http", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope) + ); + /** The username of a proxy to connect to s3 through. */ static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting( PREFIX, @@ -174,6 +181,9 @@ final class S3ClientSettings { /** The port number the proxy host should be connected on. */ final int proxyPort; + /** The proxy scheme to use for connecting to s3 through a proxy. */ + final Protocol proxyScheme; + // these should be "secure" yet the api for the s3 client only takes String, so storing them // as SecureString here won't really help with anything /** An optional username for the proxy host, for basic authentication. */ @@ -209,6 +219,7 @@ private S3ClientSettings( Protocol protocol, String proxyHost, int proxyPort, + Protocol proxyScheme, String proxyUsername, String proxyPassword, int readTimeoutMillis, @@ -224,6 +235,7 @@ private S3ClientSettings( this.protocol = protocol; this.proxyHost = proxyHost; this.proxyPort = proxyPort; + this.proxyScheme = proxyScheme; this.proxyUsername = proxyUsername; this.proxyPassword = proxyPassword; this.readTimeoutMillis = readTimeoutMillis; @@ -252,6 +264,7 @@ S3ClientSettings refine(Settings repositorySettings) { final Protocol newProtocol = getRepoSettingOrDefault(PROTOCOL_SETTING, normalizedSettings, protocol); final String newProxyHost = getRepoSettingOrDefault(PROXY_HOST_SETTING, normalizedSettings, proxyHost); final int newProxyPort = getRepoSettingOrDefault(PROXY_PORT_SETTING, normalizedSettings, proxyPort); + final Protocol newProxyScheme = getRepoSettingOrDefault(PROXY_SCHEME_SETTING, normalizedSettings, proxyScheme); final int newReadTimeoutMillis = Math.toIntExact( getRepoSettingOrDefault(READ_TIMEOUT_SETTING, normalizedSettings, TimeValue.timeValueMillis(readTimeoutMillis)).millis() ); @@ -275,6 +288,7 @@ S3ClientSettings refine(Settings repositorySettings) { && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost) && proxyPort == newProxyPort + && proxyScheme == newProxyScheme && newReadTimeoutMillis == readTimeoutMillis && maxRetries == newMaxRetries && newThrottleRetries == throttleRetries @@ -291,6 +305,7 @@ S3ClientSettings refine(Settings repositorySettings) { newProtocol, newProxyHost, newProxyPort, + newProxyScheme, proxyUsername, proxyPassword, newReadTimeoutMillis, @@ -398,6 +413,7 @@ static S3ClientSettings getClientSettings(final Settings settings, final String getConfigValue(settings, clientName, PROTOCOL_SETTING), getConfigValue(settings, clientName, PROXY_HOST_SETTING), getConfigValue(settings, clientName, PROXY_PORT_SETTING), + getConfigValue(settings, clientName, PROXY_SCHEME_SETTING), proxyUsername.toString(), proxyPassword.toString(), Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), @@ -428,6 +444,7 @@ public boolean equals(final Object o) { && Objects.equals(endpoint, that.endpoint) && protocol == that.protocol && Objects.equals(proxyHost, that.proxyHost) + && proxyScheme == that.proxyScheme && Objects.equals(proxyUsername, that.proxyUsername) && Objects.equals(proxyPassword, that.proxyPassword) && Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding) @@ -443,6 +460,7 @@ public int hashCode() { protocol, proxyHost, proxyPort, + proxyScheme, proxyUsername, proxyPassword, readTimeoutMillis, diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 97c065e771ffd..f85a66c5eb367 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -8,6 +8,7 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.regions.RegionUtils; import com.amazonaws.util.json.Jackson; import org.apache.lucene.util.SetOnce; @@ -49,6 +50,8 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); + // Pre-load region metadata to avoid looking them up dynamically without privileges enabled + RegionUtils.initialize(); } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } @@ -116,6 +119,7 @@ public List> getSettings() { S3ClientSettings.PROTOCOL_SETTING, S3ClientSettings.PROXY_HOST_SETTING, S3ClientSettings.PROXY_PORT_SETTING, + S3ClientSettings.PROXY_SCHEME_SETTING, S3ClientSettings.PROXY_USERNAME_SETTING, S3ClientSettings.PROXY_PASSWORD_SETTING, S3ClientSettings.READ_TIMEOUT_SETTING, diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java index ae2441c2e705d..b7c37c6d95fde 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java @@ -24,6 +24,7 @@ * This class emit aws s3 metrics as logs until we have a proper apm integration */ public class S3RequestRetryStats { + public static final String MESSAGE_FIELD = "message"; private static final Logger logger = LogManager.getLogger(S3RequestRetryStats.class); @@ -65,7 +66,8 @@ private static long getCounter(TimingInfo info, AWSRequestMetrics.Field field) { public void emitMetrics() { if (logger.isDebugEnabled()) { - var metrics = Maps.newMapWithExpectedSize(3); + var metrics = Maps.newMapWithExpectedSize(4); + metrics.put(MESSAGE_FIELD, "S3 Request Retry Stats"); metrics.put("elasticsearch.metrics.s3.requests", requests.get()); metrics.put("elasticsearch.metrics.s3.exceptions", exceptions.get()); metrics.put("elasticsearch.metrics.s3.throttles", throttles.get()); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 25bba12db6952..195a18891ebd0 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -135,7 +135,7 @@ public AmazonS3Reference client(RepositoryMetadata repositoryMetadata) { return existing; } final AmazonS3Reference clientReference = new AmazonS3Reference(buildClient(clientSettings)); - clientReference.incRef(); + clientReference.mustIncRef(); clientsCache = Maps.copyMapWithAddedEntry(clientsCache, clientSettings, clientReference); return clientReference; } @@ -221,6 +221,7 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyProtocol(clientSettings.proxyScheme); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } @@ -370,7 +371,7 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials // https://github.com/aws/amazon-eks-pod-identity-webhook/pull/41 stsRegion = systemEnvironment.getEnv(SDKGlobalConfiguration.AWS_REGION_ENV_VAR); if (stsRegion != null) { - stsClientBuilder.withRegion(stsRegion); + SocketAccess.doPrivilegedVoid(() -> stsClientBuilder.withRegion(stsRegion)); } else { LOGGER.warn("Unable to use regional STS endpoints because the AWS_REGION environment variable is not set"); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index 8bff849ca26c2..c48e0dc337d30 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -37,6 +37,7 @@ public void testThereIsADefaultClientByDefault() { assertThat(defaultSettings.protocol, is(Protocol.HTTPS)); assertThat(defaultSettings.proxyHost, is(emptyString())); assertThat(defaultSettings.proxyPort, is(80)); + assertThat(defaultSettings.proxyScheme, is(Protocol.HTTP)); assertThat(defaultSettings.proxyUsername, is(emptyString())); assertThat(defaultSettings.proxyPassword, is(emptyString())); assertThat(defaultSettings.readTimeoutMillis, is(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT)); diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..ecf6709a2fcef --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public abstract class AbstractRepositoryS3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + public AbstractRepositoryS3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 1cbdf357d821b..2f2f42974f131 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -8,20 +8,65 @@ package org.elasticsearch.repositories.s3; +import fixture.s3.S3HttpFixture; +import fixture.s3.S3HttpFixtureWithEC2; +import fixture.s3.S3HttpFixtureWithSessionToken; + import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3ClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(); + public static final S3HttpFixtureWithSessionToken s3HttpFixtureWithSessionToken = new S3HttpFixtureWithSessionToken(); + public static final S3HttpFixtureWithEC2 s3Ec2 = new S3HttpFixtureWithEC2(); -public class RepositoryS3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + private static final String s3TemporarySessionToken = "session_token"; + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) + .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) + .keystore("s3.client.integration_test_temporary.access_key", System.getProperty("s3TemporaryAccessKey")) + .keystore("s3.client.integration_test_temporary.secret_key", System.getProperty("s3TemporarySecretKey")) + .keystore("s3.client.integration_test_temporary.session_token", s3TemporarySessionToken) + .setting("s3.client.integration_test_permanent.endpoint", s3Fixture::getAddress) + .setting("s3.client.integration_test_temporary.endpoint", s3HttpFixtureWithSessionToken::getAddress) + .setting("s3.client.integration_test_ec2.endpoint", s3Ec2::getAddress) + .systemProperty("com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", s3Ec2::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Ec2).around(s3HttpFixtureWithSessionToken).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters( + new String[] { + "repository_s3/10_basic", + "repository_s3/20_repository_permanent_credentials", + "repository_s3/30_repository_temporary_credentials", + "repository_s3/40_repository_ec2_credentials" } + ); + } public RepositoryS3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); } } diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..e9bc9d0537cbb --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixtureWithECS; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class RepositoryS3EcsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + private static final S3HttpFixtureWithECS s3Ecs = new S3HttpFixtureWithECS(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client.integration_test_ecs.endpoint", s3Ecs::getAddress) + .environment("AWS_CONTAINER_CREDENTIALS_FULL_URI", () -> (s3Ecs.getAddress() + "/ecs_credentials_endpoint")) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Ecs).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/50_repository_ecs_credentials" }); + } + + public RepositoryS3EcsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..41f9983ef26e6 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.minio.MinioTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +public class RepositoryS3MinioClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static MinioTestContainer minio = new MinioTestContainer(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) + .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) + .setting("s3.client.integration_test_permanent.endpoint", () -> minio.getAddress()) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(minio).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/10_basic", "repository_s3/20_repository_permanent_credentials" }); + } + + public RepositoryS3MinioClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..b0a7f84c03c85 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; + +public class RepositoryS3RegionalStsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) + // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the + // S3HttpFixtureWithSTS fixture + .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") + .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + .environment("AWS_STS_REGIONAL_ENDPOINTS", "regional") + .environment("AWS_REGION", "ap-southeast-2") + .build(); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/10_basic" }); + } + + public RepositoryS3RegionalStsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..eb105e02353b6 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; +import fixture.s3.S3HttpFixtureWithSTS; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class RepositoryS3StsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(); + private static final S3HttpFixtureWithSTS s3Sts = new S3HttpFixtureWithSTS(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client.integration_test_sts.endpoint", s3Sts::getAddress) + .systemProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", () -> s3Sts.getAddress() + "/assume-role-with-web-identity") + .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) + // // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the + // // S3HttpFixtureWithSTS fixture + .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") + .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Sts).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/60_repository_sts_credentials" }); + } + + public RepositoryS3StsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java index 9f807cc9f98f1..a47b9d8b622b5 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java @@ -107,8 +107,7 @@ public void testUrlRepository() throws Exception { .prepareRestoreSnapshot("url-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); diff --git a/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java index e6f91efad0162..955bcaf8f0352 100644 --- a/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java +++ b/plugins/analysis-icu/src/internalClusterTest/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java @@ -12,7 +12,6 @@ import com.ibm.icu.util.ULocale; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin; import org.elasticsearch.plugins.Plugin; @@ -31,6 +30,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; public class ICUCollationKeywordFieldMapperIT extends ESIntegTestCase { @@ -69,8 +69,8 @@ public void testBasicUsage() throws Exception { // both values should collate to same value indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); // searching for either of the terms should return both results since they collate to the same value @@ -82,10 +82,11 @@ public void testBasicUsage() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } public void testMultipleValues() throws Exception { @@ -110,10 +111,9 @@ public void testMultipleValues() throws Exception { // everything should be indexed fine, no exceptions indexRandom( true, - client().prepareIndex(index) - .setId("1") + prepareIndex(index).setId("1") .setSource("{\"id\":\"1\", \"collate\":[\"" + equivalent[0] + "\", \"" + equivalent[1] + "\"]}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[2] + "\"}", XContentType.JSON) + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[2] + "\"}", XContentType.JSON) ); // using sort mode = max, values B and C will be used for the sort @@ -126,10 +126,11 @@ public void testMultipleValues() throws Exception { .sort("id", SortOrder.DESC) // will be ignored ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "1", "2"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "1", "2"); + }); // same thing, using different sort mode that will use a for both docs request = new SearchRequest().indices(index) @@ -141,10 +142,11 @@ public void testMultipleValues() throws Exception { .sort("id", SortOrder.DESC) // will NOT be ignored and will determine order ); - response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -173,8 +175,8 @@ public void testNormalization() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); // searching for either of the terms should return both results since they collate to the same value @@ -186,10 +188,11 @@ public void testNormalization() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -218,8 +221,8 @@ public void testSecondaryStrength() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -230,10 +233,11 @@ public void testSecondaryStrength() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -263,8 +267,8 @@ public void testIgnorePunctuation() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -275,10 +279,11 @@ public void testIgnorePunctuation() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -308,9 +313,9 @@ public void testIgnoreWhitespace() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"foo bar\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"foobar\"}", XContentType.JSON), - client().prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"foo-bar\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"foo bar\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"foobar\"}", XContentType.JSON), + prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"foo-bar\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -321,10 +326,11 @@ public void testIgnoreWhitespace() throws Exception { .sort("id", SortOrder.ASC) ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 3L); - assertOrderedSearchHits(response, "3", "1", "2"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 3L); + assertOrderedSearchHits(response, "3", "1", "2"); + }); } /* @@ -347,17 +353,18 @@ public void testNumerics() throws Exception { assertAcked(indicesAdmin().prepareCreate(index).setMapping(builder)); - indexRandom(true, client().prepareIndex(index).setId("1").setSource(""" - {"collate":"foobar-10"}""", XContentType.JSON), client().prepareIndex(index).setId("2").setSource(""" + indexRandom(true, prepareIndex(index).setId("1").setSource(""" + {"collate":"foobar-10"}""", XContentType.JSON), prepareIndex(index).setId("2").setSource(""" {"collate":"foobar-9"}""", XContentType.JSON)); SearchRequest request = new SearchRequest().indices(index) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC)); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -384,19 +391,20 @@ public void testIgnoreAccentsButNotCase() throws Exception { assertAcked(indicesAdmin().prepareCreate(index).setMapping(builder)); - indexRandom(true, client().prepareIndex(index).setId("1").setSource(""" - {"id":"1","collate":"résumé"}""", XContentType.JSON), client().prepareIndex(index).setId("2").setSource(""" - {"id":"2","collate":"Resume"}""", XContentType.JSON), client().prepareIndex(index).setId("3").setSource(""" - {"id":"3","collate":"resume"}""", XContentType.JSON), client().prepareIndex(index).setId("4").setSource(""" + indexRandom(true, prepareIndex(index).setId("1").setSource(""" + {"id":"1","collate":"résumé"}""", XContentType.JSON), prepareIndex(index).setId("2").setSource(""" + {"id":"2","collate":"Resume"}""", XContentType.JSON), prepareIndex(index).setId("3").setSource(""" + {"id":"3","collate":"resume"}""", XContentType.JSON), prepareIndex(index).setId("4").setSource(""" {"id":"4","collate":"Résumé"}""", XContentType.JSON)); SearchRequest request = new SearchRequest().indices(index) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC).sort("id", SortOrder.DESC)); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 4L); - assertOrderedSearchHits(response, "3", "1", "4", "2"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 4L); + assertOrderedSearchHits(response, "3", "1", "4", "2"); + }); } /* @@ -422,17 +430,18 @@ public void testUpperCaseFirst() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"collate\":\"resume\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"collate\":\"Resume\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"collate\":\"resume\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"collate\":\"Resume\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) .source(new SearchSourceBuilder().fetchSource(false).sort("collate", SortOrder.ASC)); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } /* @@ -470,8 +479,8 @@ public void testCustomRules() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), + prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -482,9 +491,10 @@ public void testCustomRules() throws Exception { .sort("id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); - SearchResponse response = client().search(request).actionGet(); - assertNoFailures(response); - assertHitCount(response, 2L); - assertOrderedSearchHits(response, "2", "1"); + assertResponse(client().search(request), response -> { + assertNoFailures(response); + assertHitCount(response, 2L); + assertOrderedSearchHits(response, "2", "1"); + }); } } diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java index 5d0ec97499505..dbcf64bef33e9 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java @@ -86,7 +86,7 @@ protected void registerAzureNode(final String nodeName) { } protected void assertNumberOfNodes(int expected) { - NodesInfoResponse nodeInfos = clusterAdmin().prepareNodesInfo().clear().execute().actionGet(); + NodesInfoResponse nodeInfos = clusterAdmin().prepareNodesInfo().clear().get(); assertNotNull(nodeInfos); assertNotNull(nodeInfos.getNodes()); assertEquals(expected, nodeInfos.getNodes().size()); diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 5107bb9051bd1..b57d6bce26633 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -72,7 +72,10 @@ tasks.register("writeTestJavaPolicy") { "permission org.bouncycastle.crypto.CryptoServicesPermission \"exportSecretKey\";", "permission org.bouncycastle.crypto.CryptoServicesPermission \"exportPrivateKey\";", "permission java.io.FilePermission \"\${javax.net.ssl.trustStore}\", \"read\";", - " permission java.util.PropertyPermission \"com.amazonaws.sdk.ec2MetadataServiceEndpointOverride\", \"write\";", + "permission java.util.PropertyPermission \"com.amazonaws.sdk.ec2MetadataServiceEndpointOverride\", \"write\";", + "permission java.security.SecurityPermission \"getProperty.jdk.tls.disabledAlgorithms\";", + "permission java.security.SecurityPermission \"getProperty.jdk.certpath.disabledAlgorithms\";", + "permission java.security.SecurityPermission \"getProperty.keystore.type.compat\";", "};" ].join("\n") ) diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index ff32759508038..94aa05288a55c 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -62,6 +62,7 @@ static ClientConfiguration buildConfiguration(Ec2ClientSettings clientSettings) // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyProtocol(clientSettings.proxyScheme); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java index 043114b26c81b..3a1cd1f1d33e6 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -48,6 +48,14 @@ final class Ec2ClientSettings { /** The port of a proxy to connect to ec2 through. */ static final Setting PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1 << 16, Property.NodeScope); + /** The scheme to use for the proxy connection to ec2. Defaults to "http". */ + static final Setting PROXY_SCHEME_SETTING = new Setting<>( + "discovery.ec2.proxy.scheme", + "http", + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope + ); + /** An override for the ec2 endpoint to connect to. */ static final Setting ENDPOINT_SETTING = new Setting<>( "discovery.ec2.endpoint", @@ -56,7 +64,7 @@ final class Ec2ClientSettings { Property.NodeScope ); - /** The protocol to use to connect to to ec2. */ + /** The protocol to use to connect to ec2. */ static final Setting PROTOCOL_SETTING = new Setting<>( "discovery.ec2.protocol", "https", @@ -99,6 +107,9 @@ final class Ec2ClientSettings { /** The port number the proxy host should be connected on. */ final int proxyPort; + /** The scheme to use for the proxy connection to ec2 */ + final Protocol proxyScheme; + // these should be "secure" yet the api for the ec2 client only takes String, so // storing them // as SecureString here won't really help with anything @@ -117,6 +128,7 @@ protected Ec2ClientSettings( Protocol protocol, String proxyHost, int proxyPort, + Protocol proxyScheme, String proxyUsername, String proxyPassword, int readTimeoutMillis @@ -126,6 +138,7 @@ protected Ec2ClientSettings( this.protocol = protocol; this.proxyHost = proxyHost; this.proxyPort = proxyPort; + this.proxyScheme = proxyScheme; this.proxyUsername = proxyUsername; this.proxyPassword = proxyPassword; this.readTimeoutMillis = readTimeoutMillis; @@ -196,6 +209,7 @@ static Ec2ClientSettings getClientSettings(Settings settings) { PROTOCOL_SETTING.get(settings), PROXY_HOST_SETTING.get(settings), PROXY_PORT_SETTING.get(settings), + PROXY_SCHEME_SETTING.get(settings), proxyUsername.toString(), proxyPassword.toString(), (int) READ_TIMEOUT_SETTING.get(settings).millis() diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 08cf7ea559bf7..69447e800d4ac 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -104,6 +104,7 @@ public List> getSettings() { Ec2ClientSettings.PROTOCOL_SETTING, Ec2ClientSettings.PROXY_HOST_SETTING, Ec2ClientSettings.PROXY_PORT_SETTING, + Ec2ClientSettings.PROXY_SCHEME_SETTING, Ec2ClientSettings.PROXY_USERNAME_SETTING, Ec2ClientSettings.PROXY_PASSWORD_SETTING, Ec2ClientSettings.READ_TIMEOUT_SETTING, diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java index bb73b951ca4f7..aa4a5bd6e54ea 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -119,7 +119,16 @@ public void testRejectionOfLoneSessionToken() { } public void testAWSDefaultConfiguration() { - launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); + launchAWSConfigurationTest( + Settings.EMPTY, + Protocol.HTTPS, + null, + -1, + Protocol.HTTP, + null, + null, + ClientConfiguration.DEFAULT_SOCKET_TIMEOUT + ); } public void testAWSConfigurationWithAwsSettings() { @@ -130,10 +139,20 @@ public void testAWSConfigurationWithAwsSettings() { .put("discovery.ec2.protocol", "http") .put("discovery.ec2.proxy.host", "aws_proxy_host") .put("discovery.ec2.proxy.port", 8080) + .put("discovery.ec2.proxy.scheme", "http") .put("discovery.ec2.read_timeout", "10s") .setSecureSettings(secureSettings) .build(); - launchAWSConfigurationTest(settings, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", "aws_proxy_password", 10000); + launchAWSConfigurationTest( + settings, + Protocol.HTTP, + "aws_proxy_host", + 8080, + Protocol.HTTP, + "aws_proxy_username", + "aws_proxy_password", + 10000 + ); } protected void launchAWSConfigurationTest( @@ -141,6 +160,7 @@ protected void launchAWSConfigurationTest( Protocol expectedProtocol, String expectedProxyHost, int expectedProxyPort, + Protocol expectedProxyScheme, String expectedProxyUsername, String expectedProxyPassword, int expectedReadTimeout @@ -151,6 +171,7 @@ protected void launchAWSConfigurationTest( assertThat(configuration.getProtocol(), is(expectedProtocol)); assertThat(configuration.getProxyHost(), is(expectedProxyHost)); assertThat(configuration.getProxyPort(), is(expectedProxyPort)); + assertThat(configuration.getProxyProtocol(), is(expectedProxyScheme)); assertThat(configuration.getProxyUsername(), is(expectedProxyUsername)); assertThat(configuration.getProxyPassword(), is(expectedProxyPassword)); assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 93ff42fb50218..b9bea564e2720 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; @@ -160,6 +161,7 @@ public void testClientSettingsReInit() throws IOException { final Settings settings1 = Settings.builder() .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_1") .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 881) + .put(Ec2ClientSettings.PROXY_SCHEME_SETTING.getKey(), "http") .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_1") .setSecureSettings(mockSecure1) .build(); @@ -175,6 +177,7 @@ public void testClientSettingsReInit() throws IOException { final Settings settings2 = Settings.builder() .put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_2") .put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 882) + .put(Ec2ClientSettings.PROXY_SCHEME_SETTING.getKey(), "http") .put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_2") .setSecureSettings(mockSecure2) .build(); @@ -194,6 +197,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyProtocol(), is(Protocol.HTTP)); assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); } // reload secure settings2 @@ -211,6 +215,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyProtocol(), is(Protocol.HTTP)); assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); } } @@ -228,6 +233,7 @@ public void testClientSettingsReInit() throws IOException { assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(882)); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyProtocol(), is(Protocol.HTTP)); assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_2")); } } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java index 45c2a9208b8d6..9b7c6afbb9f10 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java @@ -56,7 +56,7 @@ protected Analyzer wrapAnalyzer(Analyzer analyzer, Integer maxAnalyzedOffset) { } @Override - protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchHighlightContext.Field field, Encoder encoder) { + protected PassageFormatter getPassageFormatter(SearchHighlightContext.Field field, Encoder encoder) { return new AnnotatedPassageFormatter(encoder); } diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index e92c7ca4bdebb..ee16153a98de1 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.plugin.mapper.MapperSizePlugin; import org.elasticsearch.plugins.Plugin; @@ -24,6 +23,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; @@ -100,7 +100,7 @@ private void assertSizeMappingEnabled(String index, boolean enabled) throws IOEx public void testBasic() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=true")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); GetResponse getResponse = client().prepareGet("test", "1").setStoredFields("_size").get(); assertNotNull(getResponse.getField("_size")); assertEquals(source.length(), (int) getResponse.getField("_size").getValue()); @@ -109,44 +109,65 @@ public void testBasic() throws Exception { public void testGetWithFields() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=true")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - SearchResponse searchResponse = prepareSearch("test").addFetchField("_size").get(); - assertEquals(source.length(), ((Long) searchResponse.getHits().getHits()[0].getFields().get("_size").getValue()).intValue()); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + assertResponse( + prepareSearch("test").addFetchField("_size"), + response -> assertEquals( + source.length(), + ((Long) response.getHits().getHits()[0].getFields().get("_size").getValue()).intValue() + ) + ); // this should not work when requesting fields via wildcard expression - searchResponse = prepareSearch("test").addFetchField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); // This should STILL work - searchResponse = prepareSearch("test").addStoredField("*").get(); - assertNotNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addStoredField("*"), + response -> assertNotNull(response.getHits().getHits()[0].getFields().get("_size")) + ); } public void testWildCardWithFieldsWhenDisabled() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=false")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - SearchResponse searchResponse = prepareSearch("test").addFetchField("_size").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + assertResponse( + prepareSearch("test").addFetchField("_size"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addFetchField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addStoredField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addStoredField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); } public void testWildCardWithFieldsWhenNotProvided() throws Exception { assertAcked(prepareCreate("test")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - SearchResponse searchResponse = prepareSearch("test").addFetchField("_size").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + assertResponse( + prepareSearch("test").addFetchField("_size"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addFetchField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addFetchField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); - searchResponse = prepareSearch("test").addStoredField("*").get(); - assertNull(searchResponse.getHits().getHits()[0].getFields().get("_size")); + assertResponse( + prepareSearch("test").addStoredField("*"), + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + ); } } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index b76d2e27be66a..16e8d2610f3fb 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -61,9 +61,9 @@ public void testSimpleWorkflow() { logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); - client().prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-2").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + prepareIndex("test-idx-3").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); } client().admin().indices().prepareRefresh().get(); assertThat(count(client, "test-idx-1"), equalTo(100L)); @@ -111,8 +111,7 @@ public void testSimpleWorkflow() { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -129,8 +128,7 @@ public void testSimpleWorkflow() { .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); assertThat(count(client, "test-idx-1"), equalTo(100L)); diff --git a/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java b/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java index 4d1f6426821c4..4a35779a42166 100644 --- a/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java +++ b/plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/AbstractAzureFsTestCase.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.store.smb; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.plugin.store.smb.SMBStorePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -16,7 +15,7 @@ import java.util.Arrays; import java.util.Collection; -import static org.hamcrest.Matchers.is; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; public abstract class AbstractAzureFsTestCase extends ESIntegTestCase { @Override @@ -32,7 +31,6 @@ public void testAzureFs() { indexDoc("test", "" + i, "foo", "bar"); } refresh(); - SearchResponse response = prepareSearch("test").get(); - assertThat(response.getHits().getTotalHits().value, is(nbDocs)); + assertHitCount(prepareSearch("test"), nbDocs); } } diff --git a/qa/ccs-common-rest/build.gradle b/qa/ccs-common-rest/build.gradle index eb4c40044f14b..8ad306144bd98 100644 --- a/qa/ccs-common-rest/build.gradle +++ b/qa/ccs-common-rest/build.gradle @@ -41,6 +41,7 @@ tasks.named("yamlRestTest") { 'search.aggregation/50_filter/Standard queries get cached', 'search.aggregation/50_filter/Terms lookup gets cached', // terms lookup by "index" doesn't seem to work correctly 'search.aggregation/70_adjacency_matrix/Terms lookup', // terms lookup by "index" doesn't seem to work correctly + 'search/350_point_in_time/point-in-time with index filter' ].join(',') } diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index 5ad525b472b12..7c1514d2d1a6a 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -389,10 +389,7 @@ private boolean shouldReplaceIndexWithRemote(String apiName) { if (apiName.equals("search") || apiName.equals("msearch") || apiName.equals("async_search.submit")) { final String testCandidateTestPath = testCandidate.getTestPath(); - if (testCandidateTestPath.equals("search/350_point_in_time/basic") - || testCandidateTestPath.equals("search/350_point_in_time/point-in-time with slicing") - || testCandidateTestPath.equals("search/350_point_in_time/msearch") - || testCandidateTestPath.equals("search/350_point_in_time/wildcard") + if (testCandidateTestPath.startsWith("search/350_point_in_time") || testCandidateTestPath.equals("async_search/20-with-poin-in-time/Async search with point in time")) { return false; } diff --git a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java index 1bb2116cc680a..63860c6355630 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java +++ b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java @@ -175,8 +175,12 @@ void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int r ) ) { SearchResponse searchResponse = SearchResponse.fromXContent(parser); - ElasticsearchAssertions.assertNoFailures(searchResponse); - ElasticsearchAssertions.assertHitCount(searchResponse, expectedDocs); + try { + ElasticsearchAssertions.assertNoFailures(searchResponse); + ElasticsearchAssertions.assertHitCount(searchResponse, expectedDocs); + } finally { + searchResponse.decRef(); + } } } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 3279777c793ba..b17b81b6ac188 100644 --- a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -12,26 +12,20 @@ import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -45,13 +39,12 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; -import org.junit.AfterClass; -import org.junit.Before; import java.io.IOException; import java.util.Collections; @@ -62,28 +55,13 @@ import java.util.concurrent.TimeUnit; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; @SuppressWarnings("removal") public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { - private static RestHighLevelClient restHighLevelClient; - private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); - @Before - public void initHighLevelClient() throws IOException { - super.initClient(); - if (restHighLevelClient == null) { - restHighLevelClient = new HighLevelClient(client()); - } - } - - @AfterClass - public static void cleanupClient() throws IOException { - restHighLevelClient.close(); - restHighLevelClient = null; - } - @Override public void tearDown() throws Exception { super.tearDown(); @@ -103,7 +81,7 @@ private static MockTransportService startTransport( MockTransportService newService = MockTransportService.createNewService(s, version, transportVersion, threadPool, null); try { newService.registerRequestHandler( - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchShardsRequest::new, (request, channel, task) -> { @@ -111,7 +89,7 @@ private static MockTransportService startTransport( } ); newService.registerRequestHandler( - SearchAction.NAME, + TransportSearchAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchRequest::new, (request, channel, task) -> { @@ -176,57 +154,74 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString())); for (int i = 0; i < 10; i++) { - restHighLevelClient.index(new IndexRequest("index").id(String.valueOf(i)).source("field", "value"), RequestOptions.DEFAULT); + Request request = new Request("POST", "/index/_doc"); + request.setJsonEntity("{ \"field\" : \"value\" }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); } Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index"), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, response.getClusters()); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(2, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); - assertEquals(1, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(0, response.getHits().getTotalHits().value); + Response response = client().performRequest(new Request("GET", "/remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(0)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0)); } { - SearchResponse response = restHighLevelClient.search( - new SearchRequest("index", "remote1:index").scroll("1m"), - RequestOptions.DEFAULT - ); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(2, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); - String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.scroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); - assertEquals(10, scrollResponse.getHits().getTotalHits().value); - assertEquals(0, scrollResponse.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); + String scrollId = objectPath.evaluate("_scroll_id"); + assertNotNull(scrollId); + Request scrollRequest = new Request("POST", "/_search/scroll"); + scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }"); + Response scrollResponse = client().performRequest(scrollRequest); + assertEquals(200, scrollResponse.getStatusLine().getStatusCode()); + ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse); + assertNull(scrollObjectPath.evaluate("_clusters")); + assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0)); } remoteTransport.close(); @@ -234,45 +229,57 @@ public void testSearchSkipUnavailable() throws IOException { updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", true)); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); - assertEquals(1, response.getClusters().getTotal()); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(0, response.getHits().getTotalHits().value); + Response response = client().performRequest(new Request("GET", "/remote1:index/_search")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(0)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0)); } { - SearchResponse response = restHighLevelClient.search( - new SearchRequest("index", "remote1:index").scroll("1m"), - RequestOptions.DEFAULT - ); - assertEquals(2, response.getClusters().getTotal()); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertEquals(1, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); - assertEquals(0, response.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); - String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.scroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); - assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); - assertEquals(10, scrollResponse.getHits().getTotalHits().value); - assertEquals(0, scrollResponse.getHits().getHits().length); + Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m")); + assertEquals(200, response.getStatusLine().getStatusCode()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertNotNull(objectPath.evaluate("_clusters")); + assertThat(objectPath.evaluate("_clusters.total"), equalTo(2)); + assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1)); + assertThat(objectPath.evaluate("_clusters.running"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0)); + assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0)); + assertThat(objectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10)); + String scrollId = objectPath.evaluate("_scroll_id"); + assertNotNull(scrollId); + Request scrollRequest = new Request("POST", "/_search/scroll"); + scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }"); + Response scrollResponse = client().performRequest(scrollRequest); + assertEquals(200, scrollResponse.getStatusLine().getStatusCode()); + ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse); + assertNull(scrollObjectPath.evaluate("_clusters")); + assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10)); + assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0)); } updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", false)); @@ -344,28 +351,25 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { private static void assertSearchConnectFailure() { { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/index,remote1:index/_search")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/remote1:index/_search")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } { - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index").scroll("1m"), RequestOptions.DEFAULT) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("POST", "/remote1:index/_search?scroll=1m")) ); - ElasticsearchException rootCause = (ElasticsearchException) exception.getRootCause(); - assertThat(rootCause.getMessage(), containsString("connect_exception")); + assertThat(exception.getMessage(), containsString("connect_exception")); } } @@ -399,12 +403,6 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map set return new NStringEntity(requestBody, ContentType.APPLICATION_JSON); } - private static class HighLevelClient extends RestHighLevelClient { - private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); - } - } - @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray())); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 6af9bc9b11723..e5bc4a729f8b1 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; @@ -37,6 +38,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.transport.Compression; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -68,7 +70,6 @@ import static java.util.Collections.singletonMap; import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.test.MapMatcher.assertMap; @@ -889,7 +890,7 @@ public void testRecovery() throws Exception { if (isRunningAgainstOldCluster()) { count = between(200, 300); Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -941,7 +942,7 @@ public void testSnapshotRestore() throws IOException { // Create the index count = between(200, 300); Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -1435,7 +1436,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { public void testOperationBasedRecovery() throws Exception { if (isRunningAgainstOldCluster()) { Settings.Builder settings = indexSettings(1, 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -1498,7 +1499,7 @@ public void testResize() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -1619,7 +1620,7 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); putAliasRequest.setJsonEntity(""" diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 5255cbf401c9a..9c5415f1d5ea9 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -17,31 +17,18 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.bulk.BulkProcessor2; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.client.asyncsearch.AsyncSearchResponse; +import org.elasticsearch.client.ResponseListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -55,9 +42,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -77,22 +62,22 @@ import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.phrase.DirectCandidateGeneratorBuilder; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; -import org.junit.AfterClass; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URLEncoder; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; @@ -100,7 +85,6 @@ import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -110,16 +94,14 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; -import static java.util.stream.Collectors.toList; +import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.Matchers.empty; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.not; /** * This test class executes twice, first against the remote cluster, and then against another cluster that has the remote cluster @@ -137,13 +119,13 @@ public class CCSDuelIT extends ESRestTestCase { private static final String REMOTE_INDEX_NAME = "my_remote_cluster:" + INDEX_NAME; private static final String[] TAGS = new String[] { "java", "xml", "sql", "html", "php", "ruby", "python", "perl" }; - private static RestHighLevelClient restHighLevelClient; + private static boolean init = false; @Before public void init() throws Exception { super.initClient(); - if (restHighLevelClient == null) { - restHighLevelClient = new HighLevelClient(client()); + if (init == false) { + init = true; String destinationCluster = System.getProperty("tests.rest.suite"); // we index docs with private randomness otherwise the two clusters end up with exactly the same documents // given that this test class is run twice with same seed. @@ -155,18 +137,6 @@ public void init() throws Exception { } } - private static class HighLevelClient extends RestHighLevelClient { - private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); - } - } - - @AfterClass - public static void cleanupClient() throws IOException { - IOUtils.close(restHighLevelClient); - restHighLevelClient = null; - } - @Override protected boolean preserveIndicesUponCompletion() { return true; @@ -177,14 +147,13 @@ protected boolean preserveDataStreamsUponCompletion() { return true; } - private static void indexDocuments(String idPrefix) throws IOException, InterruptedException { + private void indexDocuments(String idPrefix) throws IOException, InterruptedException { // this index with a single document is used to test partial failures - IndexRequest indexRequest = new IndexRequest(INDEX_NAME + "_err"); - indexRequest.id("id"); - indexRequest.source("id", "id", "creationDate", "err"); - indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(201, indexResponse.status().getStatus()); + Request request = new Request("POST", "/" + INDEX_NAME + "_err/_doc"); + request.addParameter("refresh", "wait_for"); + request.setJsonEntity("{ \"id\" : \"id\", \"creationDate\" : \"err\" }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); ElasticsearchAssertions.assertAcked(createIndex(INDEX_NAME + "_empty")); @@ -209,82 +178,98 @@ private static void indexDocuments(String idPrefix) throws IOException, Interrup }"""; ElasticsearchAssertions.assertAcked(createIndex(INDEX_NAME, settings, mapping)); - BulkProcessor2 bulkProcessor = BulkProcessor2.builder( - (r, l) -> restHighLevelClient.bulkAsync(r, RequestOptions.DEFAULT, l), - new BulkProcessor2.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) {} - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - assertFalse(response.hasFailures()); - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Exception failure) { - throw new AssertionError("Failed to execute bulk", failure); - } - }, - new DeterministicTaskQueue(random()).getThreadPool() - ).build(); + CountDownLatch latch = new CountDownLatch(2); int numQuestions = randomIntBetween(50, 100); - for (int i = 0; i < numQuestions; i++) { - bulkProcessor.add(buildIndexRequest(idPrefix + i, "question", null)); + { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < numQuestions; i++) { + buildIndexRequest(builder, idPrefix + i, "question", null); + } + executeBulkAsync(builder.toString(), latch); } - int numAnswers = randomIntBetween(100, 150); - for (int i = 0; i < numAnswers; i++) { - bulkProcessor.add(buildIndexRequest(idPrefix + (i + 1000), "answer", idPrefix + randomIntBetween(0, numQuestions - 1))); + { + StringBuilder builder = new StringBuilder(); + int numAnswers = randomIntBetween(100, 150); + for (int i = 0; i < numAnswers; i++) { + buildIndexRequest(builder, idPrefix + (i + 1000), "answer", idPrefix + randomIntBetween(0, numQuestions - 1)); + } + executeBulkAsync(builder.toString(), latch); } - assertTrue(bulkProcessor.awaitClose(30, TimeUnit.SECONDS)); + + assertTrue(latch.await(30, TimeUnit.SECONDS)); RefreshResponse refreshResponse = refresh(INDEX_NAME); ElasticsearchAssertions.assertNoFailures(refreshResponse); } - private static IndexRequest buildIndexRequest(String id, String type, String questionId) { - IndexRequest indexRequest = new IndexRequest(INDEX_NAME); - indexRequest.id(id); + private void executeBulkAsync(String body, CountDownLatch latch) { + Request bulk = new Request("POST", "/_bulk"); + bulk.setJsonEntity(body); + client().performRequestAsync(bulk, new ResponseListener() { + @Override + public void onSuccess(Response response) { + try { + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("errors"), Matchers.equalTo(false)); + } catch (IOException ioException) { + throw new UncheckedIOException(ioException); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exception) { + try { + fail(exception.getMessage()); + } finally { + latch.countDown(); + } + } + }); + } + + private static void buildIndexRequest(StringBuilder buffer, String id, String type, String questionId) { + // { "index" : { "_index" : "test", "_id" : "1" } }/n + buffer.append("{ \"index\" : { \"_index\" : \"").append(INDEX_NAME).append("\", \"_id\" : \"").append(id).append("\""); if (questionId != null) { - indexRequest.routing(questionId); + buffer.append(", \"routing\" : \"").append(questionId).append("\""); } - indexRequest.create(true); + buffer.append(" } }\n"); int numTags = randomIntBetween(1, 3); Set tags = new HashSet<>(); if (questionId == null) { for (int i = 0; i < numTags; i++) { - tags.add(randomFrom(TAGS)); + tags.add("\"" + randomFrom(TAGS) + "\""); } } String[] tagsArray = tags.toArray(new String[0]); String date = LocalDate.of(2019, 1, randomIntBetween(1, 31)).format(DateTimeFormatter.ofPattern("yyyy/MM/dd", Locale.ROOT)); - Map joinField = new HashMap<>(); - joinField.put("name", type); + + buffer.append("{ "); + buffer.append("\"id\" : \"").append(id).append("\","); + buffer.append("\"type\" : \"").append(type).append("\","); + buffer.append("\"votes\" : ").append(randomIntBetween(0, 30)).append(","); if (questionId != null) { - joinField.put("parent", questionId); - } - indexRequest.source( - XContentType.JSON, - "id", - id, - "type", - type, - "votes", - randomIntBetween(0, 30), - "questionId", - questionId, - "tags", - tagsArray, - "user", - "user" + randomIntBetween(1, 10), - "suggest", - Collections.singletonMap("input", tagsArray), - "creationDate", - date, - "join", - joinField - ); - return indexRequest; + buffer.append("\"questionId\" : \"").append(questionId).append("\","); + } else { + buffer.append("\"questionId\" : ").append(questionId).append(","); + } + buffer.append("\"tags\" : [").append(String.join(",", Arrays.asList(tagsArray))).append("],"); + buffer.append("\"user\" : \"").append("user").append(randomIntBetween(1, 10)).append("\","); + buffer.append("\"suggest\" : ") + .append("{") + .append("\"input\" : [") + .append(String.join(",", Arrays.asList(tagsArray))) + .append("]},"); + buffer.append("\"creationDate\" : \"").append(date).append("\","); + buffer.append("\"join\" : {"); + buffer.append("\"name\" : \"").append(type).append("\""); + if (questionId != null) { + buffer.append(", \"parent\" : \"").append(questionId).append("\""); + } + buffer.append("}}\n"); } public void testMatchAll() throws Exception { @@ -376,9 +361,9 @@ public void testHighlighting() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.highlighter(new HighlightBuilder().field("tags")); sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertFalse(response.getHits().getHits()[0].getHighlightFields().isEmpty()); + assertFalse(response.evaluateMapKeys("hits.hits.0.highlight").isEmpty()); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -398,9 +383,9 @@ public void testFetchSource() throws Exception { sourceBuilder.fetchSource(new String[] { "tags" }, Strings.EMPTY_ARRAY); sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getSourceAsMap().size()); + assertThat(response.evaluateMapKeys("hits.hits.0._source").size(), equalTo(1)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -419,10 +404,10 @@ public void testDocValueFields() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.docValueField("user.keyword"); sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getFields().size()); - assertNotNull(response.getHits().getHits()[0].getFields().get("user.keyword")); + assertThat(response.evaluateMapKeys("hits.hits.0.fields").size(), equalTo(1)); + assertTrue(response.evaluateMapKeys("hits.hits.0.fields").contains("user.keyword")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -440,10 +425,10 @@ public void testScriptFields() throws Exception { assumeMultiClusterSetup(); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.scriptField("parent", new Script(ScriptType.INLINE, "painless", "doc['join#question']", Collections.emptyMap())); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(1, response.getHits().getHits()[0].getFields().size()); - assertNotNull(response.getHits().getHits()[0].getFields().get("parent")); + assertThat(response.evaluateMapKeys("hits.hits.0.fields").size(), equalTo(1)); + assertTrue(response.evaluateMapKeys("hits.hits.0.fields").contains("parent")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -462,9 +447,9 @@ public void testExplain() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.explain(true); sourceBuilder.query(QueryBuilders.matchQuery("tags", "sql")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertNotNull(response.getHits().getHits()[0].getExplanation()); + assertNotNull(response.evaluate("hits.hits.0._explanation")); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -486,7 +471,6 @@ public void testRescore() throws Exception { rescorerBuilder.setScoreMode(QueryRescoreMode.Multiply); rescorerBuilder.setRescoreQueryWeight(5); sourceBuilder.addRescorer(rescorerBuilder); - { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); @@ -541,13 +525,18 @@ public void testProfile() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.profile(true); sourceBuilder.query(QueryBuilders.matchQuery("tags", "html")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertFalse(response.getProfileResults().isEmpty()); - assertThat( - response.getProfileResults().values().stream().filter(sr -> sr.getFetchPhase() != null).collect(toList()), - not(empty()) - ); + assertFalse(response.evaluateMapKeys("profile").isEmpty()); + int size = response.evaluateArraySize("profile.shards"); + boolean fail = true; + for (int i = 0; i < size; i++) { + if (response.evaluate("profile.shards." + i + ".fetch") != null) { + fail = false; + break; + } + } + assertFalse("profile might be incomplete", fail); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -570,10 +559,11 @@ public void testSortByField() throws Exception { sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response, 30); - if (response.getHits().getTotalHits().value > 30) { - assertEquals(3, response.getHits().getHits()[0].getSortValues().length); + int total = response.evaluate("hits.total.value"); + if (total > 30) { + assertThat(response.evaluateArraySize("hits.hits.0.sort"), equalTo(3)); } }; { @@ -597,16 +587,16 @@ public void testSortByFieldOneClusterHasNoResults() throws Exception { sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - SearchHit[] hits = response.getHits().getHits(); - for (SearchHit hit : hits) { - assertEquals(3, hit.getSortValues().length); - assertEquals(INDEX_NAME, hit.getIndex()); + int size = response.evaluateArraySize("hits.hits"); + for (int i = 0; i < size; i++) { + String hit = "hits.hits." + i; + assertThat(response.evaluateArraySize(hit + ".sort"), equalTo(3)); if (onlyRemote) { - assertEquals("my_remote_cluster", hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(REMOTE_INDEX_NAME)); } else { - assertNull(hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(INDEX_NAME)); } } }; @@ -621,14 +611,15 @@ public void testFieldCollapsingOneClusterHasNoResults() throws Exception { boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); sourceBuilder.collapse(new CollapseBuilder("user.keyword")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - for (SearchHit hit : response.getHits().getHits()) { - assertEquals(INDEX_NAME, hit.getIndex()); + int size = response.evaluateArraySize("hits.hits"); + for (int i = 0; i < size; i++) { + String hit = "hits.hits." + i; if (onlyRemote) { - assertEquals("my_remote_cluster", hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(REMOTE_INDEX_NAME)); } else { - assertNull(hit.getClusterAlias()); + assertThat(response.evaluate(hit + "._index"), equalTo(INDEX_NAME)); } } }; @@ -661,9 +652,9 @@ public void testFieldCollapsingSortByField() throws Exception { sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort(new ScoreSortBuilder()); sourceBuilder.collapse(new CollapseBuilder("user.keyword")); - Consumer responseChecker = response -> { + CheckedConsumer responseChecker = response -> { assertHits(response); - assertEquals(2, response.getHits().getHits()[0].getSortValues().length); + assertThat(response.evaluateArraySize("hits.hits.0.sort"), equalTo(2)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); @@ -804,7 +795,7 @@ public void testPipelineAggs() throws Exception { searchRequest.source(sourceBuilder); duelRequest(searchRequest, response -> { assertAggs(response); - assertNotNull(response.getAggregations().get("most_voted")); + assertTrue(response.evaluateMapKeys("aggregations").contains("bucket_metric_value#most_voted")); }); duelRequest(searchRequest, CCSDuelIT::assertAggs); } @@ -813,7 +804,7 @@ public void testPipelineAggs() throws Exception { searchRequest.source(sourceBuilder); duelRequest(searchRequest, response -> { assertAggs(response); - assertNotNull(response.getAggregations().get("most_voted")); + assertTrue(response.evaluateMapKeys("aggregations").contains("bucket_metric_value#most_voted")); }); duelRequest(searchRequest, CCSDuelIT::assertAggs); } @@ -847,12 +838,12 @@ public void testTopHits() throws Exception { public void testTermsLookup() throws Exception { assumeMultiClusterSetup(); - IndexRequest indexRequest = new IndexRequest("lookup_index"); - indexRequest.id("id"); - indexRequest.source("tags", new String[] { "java", "sql", "html", "jax-ws" }); - indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); - assertEquals(201, indexResponse.status().getStatus()); + Request request = new Request("POST", "/lookup_index/_doc/id"); + request.addParameter("refresh", "wait_for"); + request.setJsonEntity("{ \"tags\" : [ \"java\", \"sql\", \"html\", \"jax-ws\" ] }"); + Response response = client().performRequest(request); + assertEquals(201, response.getStatusLine().getStatusCode()); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); TermsQueryBuilder termsQueryBuilder = new TermsQueryBuilder("tags", new TermsLookup("lookup_index", "id", "tags")); sourceBuilder.query(termsQueryBuilder); @@ -879,11 +870,11 @@ public void testShardFailures() throws Exception { boolean compareAsyncAndSyncResponses = false; duelRequest(searchRequest, response -> { assertMultiClusterSearchResponse(response); - assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); - assertNull(response.getAggregations()); - assertNull(response.getSuggest()); - assertThat(response.getHits().getHits().length, greaterThan(0)); - assertThat(response.getFailedShards(), greaterThanOrEqualTo(2)); + assertThat(response.evaluate("hits.total.value"), greaterThan(0)); + assertNull(response.evaluate("aggregations")); + assertNull(response.evaluate("suggest")); + assertThat(response.evaluateArraySize("hits.hits"), greaterThan(0)); + assertThat(response.evaluate("_shards.failed"), greaterThanOrEqualTo(2)); }, compareAsyncAndSyncResponses); } @@ -894,24 +885,21 @@ public void testTermSuggester() throws Exception { suggestBuilder.setGlobalText("jva hml"); suggestBuilder.addSuggestion("tags", new TermSuggestionBuilder("tags").suggestMode(TermSuggestionBuilder.SuggestMode.POPULAR)); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(1, response.getSuggest().size()); - TermSuggestion tags = response.getSuggest().getSuggestion("tags"); - assertThat(tags.getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertThat(response.evaluateMapKeys("suggest").size(), equalTo(1)); + assertThat(response.evaluateArraySize("suggest.term#tags"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -926,24 +914,21 @@ public void testPhraseSuggester() throws Exception { .highlight("", "") ); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(1, response.getSuggest().size()); - PhraseSuggestion tags = response.getSuggest().getSuggestion("tags"); - assertThat(tags.getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertEquals(1, response.evaluateMapKeys("suggest").size()); + assertThat(response.evaluateArraySize("suggest.phrase#tags"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); // suggest-only queries are not supported by _async_search, so only test against sync search API - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -955,25 +940,23 @@ public void testCompletionSuggester() throws Exception { suggestBuilder.addSuggestion("java", new CompletionSuggestionBuilder("suggest").size(20).text("jav")); suggestBuilder.addSuggestion("ruby", new CompletionSuggestionBuilder("suggest").size(30).text("rub")); sourceBuilder.suggest(suggestBuilder); - Consumer responseChecker = response -> { - assertEquals(Strings.toString(response, true, true), 3, response.getSuggest().size()); - assertThat(response.getSuggest().getSuggestion("python").getEntries().size(), greaterThan(0)); - assertThat(response.getSuggest().getSuggestion("java").getEntries().size(), greaterThan(0)); - assertThat(response.getSuggest().getSuggestion("ruby").getEntries().size(), greaterThan(0)); + CheckedConsumer responseChecker = response -> { + assertThat(response.evaluateMapKeys("suggest").size(), equalTo(3)); + assertThat(response.evaluateArraySize("suggest.completion#python"), greaterThan(0)); + assertThat(response.evaluateArraySize("suggest.completion#java"), greaterThan(0)); + assertThat(response.evaluateArraySize("suggest.completion#ruby"), greaterThan(0)); }; { SearchRequest searchRequest = initLocalAndRemoteSearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertMultiClusterSearchResponse)); } { SearchRequest searchRequest = initRemoteOnlySearchRequest(); searchRequest.source(sourceBuilder); - responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse); // suggest-only queries are not supported by _async_search, so only test against sync search API - duelSearchSync(searchRequest, responseChecker); + duelSearchSync(searchRequest, responseChecker.andThen(CCSDuelIT::assertSingleRemoteClusterSearchResponse)); } } @@ -992,7 +975,7 @@ private static SearchRequest initLocalAndRemoteSearchRequest() { } private static SearchRequest initRemoteOnlySearchRequest() { - List indices = Arrays.asList("my_remote_cluster:" + INDEX_NAME); + List indices = List.of("my_remote_cluster:" + INDEX_NAME); final SearchRequest request = new SearchRequest(indices.toArray(new String[0])); if (randomBoolean()) { request.setPreFilterShardSize(between(1, 20)); @@ -1000,12 +983,15 @@ private static SearchRequest initRemoteOnlySearchRequest() { return request; } - private void duelRequest(SearchRequest searchRequest, Consumer responseChecker) throws Exception { + private void duelRequest(SearchRequest searchRequest, CheckedConsumer responseChecker) throws Exception { duelRequest(searchRequest, responseChecker, true); } - private void duelRequest(SearchRequest searchRequest, Consumer responseChecker, boolean compareAsyncToSyncResponses) - throws Exception { + private void duelRequest( + SearchRequest searchRequest, + CheckedConsumer responseChecker, + boolean compareAsyncToSyncResponses + ) throws Exception { Map syncResponseMap = duelSearchSync(searchRequest, responseChecker); Map asyncResponseMap = duelSearchAsync(searchRequest, responseChecker); if (compareAsyncToSyncResponses) { @@ -1016,26 +1002,17 @@ private void duelRequest(SearchRequest searchRequest, Consumer r /** * @return responseMap from one of the Synchronous Search Requests */ - private static Map duelSearchSync(SearchRequest searchRequest, Consumer responseChecker) + private static Map duelSearchSync(SearchRequest searchRequest, CheckedConsumer responseChecker) throws Exception { CountDownLatch latch = new CountDownLatch(2); AtomicReference exception1 = new AtomicReference<>(); - AtomicReference minimizeRoundtripsResponse = new AtomicReference<>(); + AtomicReference minimizeRoundtripsResponse = new AtomicReference<>(); searchRequest.setCcsMinimizeRoundtrips(true); - restHighLevelClient.searchAsync( - searchRequest, - RequestOptions.DEFAULT, - new LatchedActionListener<>(ActionListener.wrap(minimizeRoundtripsResponse::set, exception1::set), latch) - ); - + submitSyncSearch(searchRequest, minimizeRoundtripsResponse, exception1, latch); AtomicReference exception2 = new AtomicReference<>(); - AtomicReference fanOutResponse = new AtomicReference<>(); + AtomicReference fanOutResponse = new AtomicReference<>(); searchRequest.setCcsMinimizeRoundtrips(false); - restHighLevelClient.searchAsync( - searchRequest, - RequestOptions.DEFAULT, - new LatchedActionListener<>(ActionListener.wrap(fanOutResponse::set, exception2::set), latch) - ); + submitSyncSearch(searchRequest, fanOutResponse, exception2, latch); latch.await(); @@ -1049,8 +1026,7 @@ private static Map duelSearchSync(SearchRequest searchRequest, C if (exception2.get() != null) { throw new AssertionError("one of the two requests returned an exception", exception2.get()); } - SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.get(); - + ObjectPath minimizeRoundtripsSearchResponse = ObjectPath.createFromResponse(minimizeRoundtripsResponse.get()); responseChecker.accept(minimizeRoundtripsSearchResponse); // if only the remote cluster was searched, then only one reduce phase is expected @@ -1058,133 +1034,160 @@ private static Map duelSearchSync(SearchRequest searchRequest, C if (searchRequest.indices().length > 1) { expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; } - - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - SearchResponse fanOutSearchResponse = fanOutResponse.get(); + if (expectedReducePhasesMinRoundTrip == 1) { + assertThat( + minimizeRoundtripsSearchResponse.evaluate("num_reduce_phases"), + anyOf(equalTo(expectedReducePhasesMinRoundTrip), nullValue()) + ); + } else { + assertThat(minimizeRoundtripsSearchResponse.evaluate("num_reduce_phases"), equalTo(expectedReducePhasesMinRoundTrip)); + } + ObjectPath fanOutSearchResponse = ObjectPath.createFromResponse(fanOutResponse.get()); responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); + assertThat(fanOutSearchResponse.evaluate("num_reduce_phases"), anyOf(equalTo(1), nullValue())); // default value is 1? // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); - - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.total"), + equalTo(fanOutSearchResponse.evaluate("_cluster.total")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.successful"), + equalTo(fanOutSearchResponse.evaluate("_cluster.successful")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.skipped"), + equalTo(fanOutSearchResponse.evaluate("_cluster.skipped")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.running"), + equalTo(fanOutSearchResponse.evaluate("_cluster.running")) ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.partial"), + equalTo(fanOutSearchResponse.evaluate("_cluster.partial")) + ); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_cluster.failed"), + equalTo(fanOutSearchResponse.evaluate("_cluster.failed")) ); Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { + if (minimizeRoundtripsSearchResponse.evaluate("_clusters") != null && fanOutSearchResponse.evaluate("_clusters") != null) { Map fanOutResponseMap = responseToMap(fanOutSearchResponse); compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing sync_search minimizeRoundTrip vs. fanOut"); - assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); + assertThat( + minimizeRoundtripsSearchResponse.evaluate("_shards.skipped"), + lessThanOrEqualTo((Integer) fanOutSearchResponse.evaluate("_shards.skipped")) + ); } return minimizeRoundtripsResponseMap; } } + private static void submitSyncSearch( + SearchRequest searchRequest, + AtomicReference responseRef, + AtomicReference exceptionRef, + CountDownLatch latch + ) throws IOException { + String indices = Strings.collectionToDelimitedString(List.of(searchRequest.indices()), ","); + final Request request = new Request("POST", URLEncoder.encode(indices, StandardCharsets.UTF_8) + "/_search"); + request.addParameter("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + request.addParameter(RestSearchAction.TYPED_KEYS_PARAM, "true"); + request.setEntity(createEntity(searchRequest.source(), XContentType.JSON, ToXContent.EMPTY_PARAMS)); + client().performRequestAsync(request, new ResponseListener() { + @Override + public void onSuccess(Response response) { + try { + responseRef.set(response); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exception) { + try { + exceptionRef.set(exception); + } finally { + latch.countDown(); + } + } + }); + } + /** * @return responseMap from one of the async searches */ - private static Map duelSearchAsync(SearchRequest searchRequest, Consumer responseChecker) - throws Exception { + private static Map duelSearchAsync( + SearchRequest searchRequest, + CheckedConsumer responseChecker + ) throws Exception { searchRequest.setCcsMinimizeRoundtrips(true); - AsyncSearchResponse minimizeRoundtripsResponse = submitAsyncSearch( - searchRequest, - TimeValue.timeValueSeconds(1), - restHighLevelClient.getParserConfig() - ); + ObjectPath minimizeRoundtripsResponse = submitAsyncSearch(searchRequest, TimeValue.timeValueSeconds(1)); try { - final String responseId = minimizeRoundtripsResponse.getId(); + final String responseId = minimizeRoundtripsResponse.evaluate("id");// minimizeRoundtripsResponse.getId(); assertBusy(() -> { - AsyncSearchResponse resp = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); - assertThat(resp.isRunning(), equalTo(false)); + ObjectPath resp = getAsyncSearch(responseId); + assertThat(resp.evaluate("is_running"), equalTo(false)); }); - minimizeRoundtripsResponse = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); + minimizeRoundtripsResponse = getAsyncSearch(responseId); } finally { - deleteAsyncSearch(minimizeRoundtripsResponse.getId()); + deleteAsyncSearch(minimizeRoundtripsResponse.evaluate("id")); } searchRequest.setCcsMinimizeRoundtrips(false); - AsyncSearchResponse fanOutResponse = submitAsyncSearch( - searchRequest, - TimeValue.timeValueSeconds(1), - restHighLevelClient.getParserConfig() - ); + ObjectPath fanOutResponse = submitAsyncSearch(searchRequest, TimeValue.timeValueSeconds(1)); try { - final String responseId = fanOutResponse.getId(); + final String responseId = fanOutResponse.evaluate("id"); assertBusy(() -> { - AsyncSearchResponse resp = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); - assertThat(resp.isRunning(), equalTo(false)); + ObjectPath resp = getAsyncSearch(responseId); + assertThat(resp.evaluate("is_running"), equalTo(false)); }); - fanOutResponse = getAsyncSearch(responseId, restHighLevelClient.getParserConfig()); + fanOutResponse = getAsyncSearch(responseId); } finally { - deleteAsyncSearch(fanOutResponse.getId()); + deleteAsyncSearch(fanOutResponse.evaluate("id")); } - SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.getSearchResponse(); - SearchResponse fanOutSearchResponse = fanOutResponse.getSearchResponse(); - responseChecker.accept(minimizeRoundtripsSearchResponse); + // extract the response + minimizeRoundtripsResponse = new ObjectPath(minimizeRoundtripsResponse.evaluate("response")); + fanOutResponse = new ObjectPath(fanOutResponse.evaluate("response")); + + responseChecker.accept(minimizeRoundtripsResponse); // if only the remote cluster was searched, then only one reduce phase is expected int expectedReducePhasesMinRoundTrip = 1; if (searchRequest.indices().length > 1) { expectedReducePhasesMinRoundTrip = searchRequest.indices().length + 1; } - assertEquals(expectedReducePhasesMinRoundTrip, minimizeRoundtripsSearchResponse.getNumReducePhases()); - - responseChecker.accept(fanOutSearchResponse); - assertEquals(1, fanOutSearchResponse.getNumReducePhases()); - - // compare Clusters objects - SearchResponse.Clusters clustersMRT = minimizeRoundtripsSearchResponse.getClusters(); - SearchResponse.Clusters clustersMRTFalse = fanOutSearchResponse.getClusters(); + if (expectedReducePhasesMinRoundTrip == 1) { + assertThat( + minimizeRoundtripsResponse.evaluate("num_reduce_phases"), + anyOf(equalTo(expectedReducePhasesMinRoundTrip), nullValue()) + ); + } else { + assertThat(minimizeRoundtripsResponse.evaluate("num_reduce_phases"), equalTo(expectedReducePhasesMinRoundTrip)); + } - assertEquals(clustersMRT.getTotal(), clustersMRTFalse.getTotal()); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL) - ); - assertEquals( - clustersMRT.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), - clustersMRTFalse.getClusterStateCount(SearchResponse.Cluster.Status.FAILED) - ); + responseChecker.accept(fanOutResponse); + assertThat(fanOutResponse.evaluate("num_reduce_phases"), anyOf(equalTo(1), nullValue())); // default value is 1? - Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); - if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { - Map fanOutResponseMap = responseToMap(fanOutSearchResponse); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.total"), equalTo(fanOutResponse.evaluate("_cluster.total"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.successful"), equalTo(fanOutResponse.evaluate("_cluster.successful"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.skipped"), equalTo(fanOutResponse.evaluate("_cluster.skipped"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.running"), equalTo(fanOutResponse.evaluate("_cluster.running"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.partial"), equalTo(fanOutResponse.evaluate("_cluster.partial"))); + assertThat(minimizeRoundtripsResponse.evaluate("_cluster.failed"), equalTo(fanOutResponse.evaluate("_cluster.failed"))); + Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsResponse); + if (minimizeRoundtripsResponse.evaluate("_clusters") != null && fanOutResponse.evaluate("_clusters") != null) { + Map fanOutResponseMap = responseToMap(fanOutResponse); compareResponseMaps(minimizeRoundtripsResponseMap, fanOutResponseMap, "Comparing async_search minimizeRoundTrip vs. fanOut"); - assertThat(minimizeRoundtripsSearchResponse.getSkippedShards(), lessThanOrEqualTo(fanOutSearchResponse.getSkippedShards())); + assertThat( + minimizeRoundtripsResponse.evaluate("_shards.skipped"), + lessThanOrEqualTo((Integer) fanOutResponse.evaluate("_shards.skipped")) + ); } return minimizeRoundtripsResponseMap; } @@ -1199,11 +1202,7 @@ private static void compareResponseMaps(Map responseMap1, Map from) { - assertThat(response.getHits().getHits().length, greaterThan(0)); + int totalHits = response.evaluate("hits.total.value"); + assertThat(totalHits, greaterThan(0)); + assertThat(response.evaluate("_shards.failed"), Matchers.equalTo(0)); + assertNull(response.evaluate("hits.aggregations")); + assertNull(response.evaluate("hits.suggest")); + if (totalHits > from) { + assertThat(response.evaluateArraySize("hits.hits"), greaterThan(0)); } else { - assertThat(response.getHits().getHits().length, equalTo(0)); + assertThat(response.evaluateArraySize("hits.hits"), equalTo(0)); } } - private static void assertAggs(SearchResponse response) { - if (response.getClusters().getTotal() == 1) { + private static void assertAggs(ObjectPath response) throws IOException { + int totalHits = response.evaluate("_clusters.total"); + if (totalHits == 1) { assertSingleRemoteClusterSearchResponse(response); } else { assertMultiClusterSearchResponse(response); } - assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); - assertEquals(0, response.getHits().getHits().length); - assertNull(response.getSuggest()); - assertNotNull(response.getAggregations()); - List aggregations = response.getAggregations().asList(); - for (Aggregation aggregation : aggregations) { - if (aggregation instanceof MultiBucketsAggregation multiBucketsAggregation) { + assertThat(response.evaluate("hits.total.value"), greaterThan(0)); + assertThat(response.evaluateArraySize("hits.hits"), equalTo(0)); + assertNull(response.evaluate("suggest")); + assertNotNull(response.evaluate("aggregations")); + Set aggregations = response.evaluateMapKeys("aggregations"); + for (String aggregation : aggregations) { + if (aggregation.startsWith("date_histogram") || aggregation.startsWith("sterms")) { assertThat( - "agg " + multiBucketsAggregation.getName() + " has 0 buckets", - multiBucketsAggregation.getBuckets().size(), + aggregation + " has 0 buckets", + response.evaluateArraySize("aggregations." + aggregation + ".buckets"), greaterThan(0) ); } @@ -1324,8 +1326,8 @@ private static void assertAggs(SearchResponse response) { } @SuppressWarnings("unchecked") - private static Map responseToMap(SearchResponse response) throws IOException { - BytesReference bytesReference = XContentHelper.toXContent(response, XContentType.JSON, false); + private static Map responseToMap(ObjectPath response) throws IOException { + BytesReference bytesReference = BytesReference.bytes(response.toXContentBuilder(XContentType.JSON.xContent())); Map responseMap = XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2(); assertNotNull(responseMap.put("took", -1)); responseMap.remove("num_reduce_phases"); diff --git a/qa/packaging/README.md b/qa/packaging/README.md index 20b4f6efa3a98..f1c556e73e962 100644 --- a/qa/packaging/README.md +++ b/qa/packaging/README.md @@ -1,4 +1,4 @@ -# packaging tests +# Packaging tests This project contains tests that verify the distributions we build work correctly on the operating systems we support. They're intended to cover the @@ -6,18 +6,11 @@ steps a user would take when installing and configuring an Elasticsearch distribution. They're not intended to have significant coverage of the behavior of Elasticsearch's features. -There are two types of tests in this project. The old tests live in -`src/test/` and are written in [Bats](https://github.com/sstephenson/bats), -which is a flavor of bash scripts that run as unit tests. These tests are -deprecated because Bats is unmaintained and cannot run on Windows. - -The new tests live in `src/main/` and are written in Java. Like the old tests, -this project's tests are run inside the VM, not on your host. All new packaging -tests should be added to this set of tests if possible. - ## Running these tests -See the section in [TESTING.asciidoc](../../TESTING.asciidoc#testing-packaging) +These tests should only be run on ephemeral machines. They will likely +have undesired side effects on a developer's computer. +For more information, see the section in [TESTING.asciidoc](../../TESTING.asciidoc#testing-packaging) ## Adding a new test class diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java index e426754cd61ee..82c5909c5dfdd 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java @@ -211,14 +211,14 @@ public void test50AutoConfigurationFailsWhenCertificatesNotGenerated() throws Ex FileUtils.assertPathsDoNotExist(installation.data); Path tempDir = createTempDir("bc-backup"); Files.move( - installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk15on-1.64.jar"), - tempDir.resolve("bcprov-jdk15on-1.64.jar") + installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk18on-1.76.jar"), + tempDir.resolve("bcprov-jdk18on-1.76.jar") ); Shell.Result result = runElasticsearchStartCommand(null, false, false); assertElasticsearchFailure(result, "java.lang.NoClassDefFoundError: org/bouncycastle/", null); Files.move( - tempDir.resolve("bcprov-jdk15on-1.64.jar"), - installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk15on-1.64.jar") + tempDir.resolve("bcprov-jdk18on-1.76.jar"), + installation.lib.resolve("tools").resolve("security-cli").resolve("bcprov-jdk18on-1.76.jar") ); Platforms.onWindows(() -> sh.chown(installation.config)); FileUtils.rm(tempDir); diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 7af6ad49fb001..43d5ea842f9ef 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -305,7 +305,7 @@ public void testRecovery() throws Exception { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -340,7 +340,7 @@ public void testRetentionLeasesEstablishedWhenPromotingPrimary() throws Exceptio .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(1, 2)) // triggers nontrivial promotion .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -363,7 +363,7 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, 1)) .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } createIndex(index, settings.build()); @@ -445,9 +445,12 @@ public void testRecoveryClosedIndex() throws Exception { * time the index was closed. */ public void testCloseIndexDuringRollingUpgrade() throws Exception { - final Version minimumNodeVersion = minimumNodeVersion(); - final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(minimumNodeVersion.id)) - .toLowerCase(Locale.ROOT); + int id = switch (CLUSTER_TYPE) { + case OLD -> 1; + case MIXED -> 2; + case UPGRADED -> 3; + }; + final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(id)).toLowerCase(Locale.ROOT); if (indexExists(indexName) == false) { createIndex( @@ -461,7 +464,7 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(Version.V_7_2_0)) { + if (minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)) { // index is created on a version that supports the replication of closed indices, // so we expect the index to be closed and replicated ensureGreen(indexName); @@ -501,7 +504,7 @@ public void testClosedIndexNoopRecovery() throws Exception { if (indexVersionCreated(indexName).onOrAfter(IndexVersions.V_7_2_0)) { // index was created on a version that supports the replication of closed indices, so we expect it to be closed and replicated - assertTrue(minimumNodeVersion().onOrAfter(Version.V_7_2_0)); + assertTrue(minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)); ensureGreen(indexName); assertClosedIndex(indexName, true); if (CLUSTER_TYPE != ClusterType.OLD) { @@ -648,7 +651,7 @@ public void testOperationBasedRecovery() throws Exception { final Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; @@ -700,7 +703,6 @@ public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { public void testAutoExpandIndicesDuringRollingUpgrade() throws Exception { final String indexName = "test-auto-expand-filtering"; - final Version minimumNodeVersion = minimumNodeVersion(); Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); @@ -721,11 +723,7 @@ public void testAutoExpandIndicesDuringRollingUpgrade() throws Exception { final int numberOfReplicas = Integer.parseInt( getIndexSettingsAsMap(indexName).get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS).toString() ); - if (minimumNodeVersion.onOrAfter(Version.V_7_6_0)) { - assertEquals(nodes.size() - 2, numberOfReplicas); - } else { - assertEquals(nodes.size() - 1, numberOfReplicas); - } + assertThat(nodes, hasSize(numberOfReplicas + 2)); } public void testSoftDeletesDisabledWarning() throws Exception { @@ -733,7 +731,7 @@ public void testSoftDeletesDisabledWarning() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { boolean softDeletesEnabled = true; Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { softDeletesEnabled = randomBoolean(); settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), softDeletesEnabled); } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 440483039256c..d3af5d25b70ff 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.bwc-test' diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java index 44ee7f0b56d1c..fbd6ee8aa3759 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java @@ -14,11 +14,11 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.XContentTestUtils.JsonMapView; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -89,7 +89,7 @@ public void testSystemIndicesUpgrades() throws Exception { // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); putAliasRequest.setJsonEntity(""" diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java index cf76d86c9298f..755bbce93c95b 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java @@ -18,9 +18,11 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import java.util.concurrent.CancellationException; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; import static org.elasticsearch.test.TaskAssertions.assertAllCancellableTasksAreCancelled; @@ -28,6 +30,10 @@ public class ClusterHealthRestCancellationIT extends HttpSmokeTestCase { + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/100062", + value = "org.elasticsearch.test.TaskAssertions:TRACE" + ) public void testClusterHealthRestCancellation() throws Exception { final var barrier = new CyclicBarrier(2); @@ -37,7 +43,18 @@ public void testClusterHealthRestCancellation() throws Exception { @Override public ClusterState execute(ClusterState currentState) { safeAwait(barrier); - safeAwait(barrier); + // safeAwait(barrier); + + // temporarily lengthen timeout on safeAwait while investigating #100062 + try { + barrier.await(60, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new AssertionError("unexpected", e); + } catch (Exception e) { + throw new AssertionError("unexpected", e); + } + return currentState; } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java index c582191c085f4..ac1bde443f703 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpStatsIT.java @@ -52,7 +52,6 @@ public void testNodeHttpStats() throws IOException { assertHttpStats(new XContentTestUtils.JsonMapView((Map) nodesMap.get(nodeId))); } - @SuppressWarnings("unchecked") public void testClusterInfoHttpStats() throws IOException { internalCluster().ensureAtLeastNumDataNodes(3); performHttpRequests(); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java index 43d7630199bb2..896da65fa83dd 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java @@ -114,7 +114,7 @@ public TimeValue masterNodeTimeout() { } }; - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); internalCluster().getAnyMasterNodeInstance(ClusterService.class) .submitUnbatchedStateUpdateTask("get_mappings_cancellation_test", new AckedClusterStateUpdateTask(ackedRequest, future) { @Override diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java index a860b0855e158..73dd1525f8a08 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -15,10 +15,10 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Cancellable; @@ -73,7 +73,7 @@ public void testAutomaticCancellationDuringQueryPhase() throws Exception { scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())) ); searchRequest.setJsonEntity(Strings.toString(searchSource)); - verifyCancellationDuringQueryPhase(SearchAction.NAME, searchRequest); + verifyCancellationDuringQueryPhase(TransportSearchAction.TYPE.name(), searchRequest); } public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Exception { @@ -89,7 +89,7 @@ public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Except Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); - verifyCancellationDuringQueryPhase(MultiSearchAction.NAME, restRequest); + verifyCancellationDuringQueryPhase(TransportMultiSearchAction.TYPE.name(), restRequest); } void verifyCancellationDuringQueryPhase(String searchAction, Request searchRequest) throws Exception { @@ -98,7 +98,7 @@ void verifyCancellationDuringQueryPhase(String searchAction, Request searchReque List plugins = initBlockFactory(); indexTestData(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, wrapAsRestResponseListener(future)); awaitForBlock(plugins); @@ -116,7 +116,7 @@ public void testAutomaticCancellationDuringFetchPhase() throws Exception { new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()) ); searchRequest.setJsonEntity(Strings.toString(searchSource)); - verifyCancellationDuringFetchPhase(SearchAction.NAME, searchRequest); + verifyCancellationDuringFetchPhase(TransportSearchAction.TYPE.name(), searchRequest); } public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Exception { @@ -132,7 +132,7 @@ public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Except Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); - verifyCancellationDuringFetchPhase(MultiSearchAction.NAME, restRequest); + verifyCancellationDuringFetchPhase(TransportMultiSearchAction.TYPE.name(), restRequest); } void verifyCancellationDuringFetchPhase(String searchAction, Request searchRequest) throws Exception { @@ -141,7 +141,7 @@ void verifyCancellationDuringFetchPhase(String searchAction, Request searchReque List plugins = initBlockFactory(); indexTestData(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, wrapAsRestResponseListener(future)); awaitForBlock(plugins); @@ -186,7 +186,7 @@ private static void indexTestData() { // Make sure we have a few segments BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int j = 0; j < 20; j++) { - bulkRequestBuilder.add(client().prepareIndex("test").setId(Integer.toString(i * 5 + j)).setSource("field", "value")); + bulkRequestBuilder.add(prepareIndex("test").setId(Integer.toString(i * 5 + j)).setSource("field", "value")); } assertNoFailures(bulkRequestBuilder.get()); } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml new file mode 100644 index 0000000000000..6d6ee1f6bed41 --- /dev/null +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -0,0 +1,214 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ingest simulate added in 8.12' + +--- +"Test ingest simulate with reroute": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline-1" + body: > + { + "processors": [ + { + "set": { + "field": "my-pipeline-1-ran", + "value": true + } + }, + { + "reroute": { + "destination": "index-2-a" + } + } + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline-1" + body: > + { + "processors": [ + { + "set": { + "field": "my-final-pipeline-1-ran", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.put_template: + name: my-template-1 + body: + index_patterns: index-1-* + settings: + default_pipeline: "my-pipeline-1" + final_pipeline: "my-final-pipeline-1" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline-2" + body: > + { + "processors": [ + { + "set": { + "field": "my-pipeline-2-ran", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline-2" + body: > + { + "processors": [ + { + "set": { + "field": "my-final-pipeline-2-ran", + "value": true + } + }, + { + "uppercase": { + "field": "foo" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.put_template: + name: my-template-2 + body: + index_patterns: index-2-* + settings: + default_pipeline: "my-pipeline-2" + final_pipeline: "my-final-pipeline-2" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1-a", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1-a", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-2-a" } + - match: { docs.0.doc._source.foo: "BAR" } + - match: { docs.0.doc._source.my-pipeline-1-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-1-ran: null } + - match: { docs.0.doc._source.my-pipeline-2-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-2-ran: true } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline-1", "my-pipeline-2", "my-final-pipeline-2"] } + - match: { docs.0.doc._index: "index-2-a" } + - match: { docs.1.doc._source.foo: "RAB" } + - match: { docs.0.doc._source.my-pipeline-1-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-1-ran: null } + - match: { docs.0.doc._source.my-pipeline-2-ran: true } + - match: { docs.0.doc._source.my-final-pipeline-2-ran: true } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline-1", "my-pipeline-2", "my-final-pipeline-2"] } + +--- +"Test ingest simulate with errors": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + { + "uppercase": { + "field": "field1" + } + } + ] + } + - match: { acknowledged: true } + + - do: + indices.create: + index: index + body: + settings: + default_pipeline: "my-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index", + "_source": { + "field1": true + } + }, + { + "_index": "index", + "_source": { + "field1": "bar" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index" } + - match: { docs.0.doc.error.type: "illegal_argument_exception" } + - match: { docs.0.doc.executed_pipelines: null } + - match: { docs.1.doc._index: "index" } + - match: { docs.1.doc._source.field1: "BAR" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline"] } diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml index f8b1de5155527..92905243fdb12 100644 --- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml +++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml @@ -186,3 +186,18 @@ setup: - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev' + +--- +"Test unassigned_shards, total_allocations, undesired_allocations and undesired_allocations_fraction": + + - skip: + version: " - 8.11.99" + reason: "undesired_shard_allocation_count added in in 8.12.0" + + - do: + _internal.get_desired_balance: { } + + - gte: { 'stats.unassigned_shards' : 0 } + - gte: { 'stats.total_allocations' : 0 } + - gte: { 'stats.undesired_allocations' : 0 } + - gte: { 'stats.undesired_allocations_ratio' : 0.0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json new file mode 100644 index 0000000000000..0ab5c18671040 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json @@ -0,0 +1,38 @@ +{ + "connector.put": { + "documentation": { + "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "description": "Creates or updates a connector." + }, + "stability": "experimental", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_connector/{connector_id}", + "methods": [ + "PUT" + ], + "parts": { + "connector_id": { + "type": "string", + "description": "The unique identifier of the connector to be created or updated." + } + } + } + ] + }, + "body": { + "description": "The connector configuration.", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index c1f3079995de9..08134e211a312 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -45,6 +45,10 @@ "type":"boolean", "description":"Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false`" }, + "reopen":{ + "type":"boolean", + "description":"Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. The default is `false`" + }, "ignore_unavailable":{ "type":"boolean", "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json index 127c6a5e86640..9426d6738c374 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json @@ -7,7 +7,8 @@ "stability":"experimental", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json index 3a171640367de..26ba9ddb00608 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_model.json @@ -7,7 +7,8 @@ "stability":"experimental", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json index a25c3fee32571..bce8dfd794dca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json @@ -7,7 +7,8 @@ "stability":"stable", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ @@ -55,6 +56,9 @@ "description": "Specific the time to live for the point in time", "required": true } + }, + "body":{ + "description":"An index_filter specified with the Query DSL" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json b/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json new file mode 100644 index 0000000000000..91e7153d466da --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/simulate.ingest.json @@ -0,0 +1,48 @@ +{ + "simulate.ingest":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-ingest-api.html", + "description":"Simulates running ingest with example documents." + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ingest/_simulate", + "methods":[ + "GET", + "POST" + ] + }, + { + "path":"/_ingest/{index}/_simulate", + "methods":[ + "GET", + "POST" + ], + "parts":{ + "index":{ + "type":"string", + "description":"Default index for docs which don't provide one" + } + } + } + ] + }, + "params":{ + "pipeline":{ + "type":"string", + "description":"The pipeline id to preprocess incoming documents with if no pipeline is given for a particular document" + } + }, + "body":{ + "description":"The simulate definition", + "required":true + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml index 8e1d3431069cf..a4204034bfd80 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml @@ -221,3 +221,18 @@ setup: - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average' - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev' + +--- +"Test unassigned_shards, total_allocations, undesired_allocations and undesired_allocations_fraction": + + - skip: + version: " - 8.11.99" + reason: "undesired_shard_allocation_count added in in 8.12.0" + + - do: + _internal.get_desired_balance: { } + + - gte: { 'stats.unassigned_shards' : 0 } + - gte: { 'stats.total_allocations' : 0 } + - gte: { 'stats.undesired_allocations' : 0 } + - gte: { 'stats.undesired_allocations_ratio' : 0.0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml new file mode 100644 index 0000000000000..07c0e8b7a8b2a --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml @@ -0,0 +1,58 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ability to update non-dynamic settings added in 8.12' + + - do: + indices.create: + index: test-index + body: + settings: + index: + number_of_replicas: 0 + +--- +"Test update non dynamic settings": + - do: + indices.put_settings: + index: test-index + body: + number_of_replicas: 1 + + - do: + catch: bad_request + indices.put_settings: + index: test-index + body: + index.codec: best_compression + + - do: + catch: bad_request + indices.put_settings: + index: test-index + reopen: false + body: + index.codec: best_compression + + - do: + indices.get_settings: + index: test-index + flat_settings: false + - match: + test-index.settings.index.codec: null + + - do: + indices.put_settings: + index: test-index + reopen: true + body: + index.codec: best_compression + - match: { acknowledged: true } + + - do: + indices.get_settings: + index: test-index + flat_settings: false + - match: + test-index.settings.index.codec: "best_compression" + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml index c7477c5b538ab..6a347df112b47 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml @@ -2,43 +2,64 @@ "Basic mlt query with docs": - do: indices.create: - index: test_1 + index: mlt_test_index - do: index: - index: test_1 + index: mlt_test_index id: "1" body: { foo: bar } - do: index: - index: test_1 + index: mlt_test_index id: "2" body: { foo: baz } - do: index: - index: test_1 + index: mlt_test_index id: "3" body: { foo: foo } - do: indices.refresh: {} + - do: + get: + index: mlt_test_index + id: "1" + + - match: { _source.foo: "bar" } + + - do: + get: + index: mlt_test_index + id: "2" + + - match: { _source.foo: "baz" } + + - do: + get: + index: mlt_test_index + id: "3" + + - match: { _source.foo: "foo" } + - do: search: rest_total_hits_as_int: true - index: test_1 + index: mlt_test_index body: query: more_like_this: like: - - _index: test_1 + _index: mlt_test_index doc: foo: bar - - _index: test_1 + _index: mlt_test_index _id: "2" - _id: "3" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml new file mode 100644 index 0000000000000..a340de50bba0c --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml @@ -0,0 +1,75 @@ +--- +"Basic mlt query with docs - explicitly on same shard": + - do: + indices.create: + index: mlt_one_shard_test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 1 + + - do: + index: + index: mlt_one_shard_test_index + id: "1" + body: { foo: bar } + + - do: + index: + index: mlt_one_shard_test_index + id: "2" + body: { foo: baz } + + - do: + index: + index: mlt_one_shard_test_index + id: "3" + body: { foo: foo } + + - do: + indices.refresh: {} + + - do: + get: + index: mlt_one_shard_test_index + id: "1" + + - match: { _source.foo: "bar" } + + - do: + get: + index: mlt_one_shard_test_index + id: "2" + + - match: { _source.foo: "baz" } + + - do: + get: + index: mlt_one_shard_test_index + id: "3" + + - match: { _source.foo: "foo" } + + - do: + search: + rest_total_hits_as_int: true + index: mlt_one_shard_test_index + body: + query: + more_like_this: + like: + - + _index: mlt_one_shard_test_index + doc: + foo: bar + - + _index: mlt_one_shard_test_index + _id: "2" + - + _id: "3" + include: true + min_doc_freq: 0 + min_term_freq: 0 + + - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml index 151698482368a..545953d2645da 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml @@ -2,7 +2,37 @@ setup: - skip: version: ' - 8.10.99' reason: 'Dynamic mapping of floats to dense_vector was added in 8.11' +--- +"Fields indexed as strings won't be transformed into dense_vector": + - skip: + version: ' - 8.11.0' + reason: 'Bug fix was added in 8.11.1' + - do: + index: + index: strings-are-not-floats + refresh: true + body: + obviously_string: ["foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo", + "foo", "foo", "foo", "foo", "foo", "foo", "foo", "foo"] + - do: + cluster.health: + wait_for_events: languid + - do: + indices.get_mapping: + index: strings-are-not-floats + - match: { strings-are-not-floats.mappings.properties.obviously_string.type: text } --- "Fields with float arrays below the threshold still map as float": @@ -540,3 +570,104 @@ setup: - match: { test-copyto-index.mappings.properties.my_float2.type: float } - match: { test-copyto-index.mappings.properties.my_copyto_field.type: float } +--- +"Fields mapped as dense_vector without dims or docs have correct cluster stats values": + - skip: + version: ' - 8.11.1' + reason: 'Bug fix was added in 8.11.2' + + - do: + indices.create: + index: test-mapped-index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_dense_vector_field: + type: dense_vector + + - do: + cluster.health: + wait_for_events: languid + + - do: + cluster.stats: { } + + - match: { indices.mappings.field_types.0.name: dense_vector } + - match: { indices.mappings.field_types.0.count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_min: -1 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_max: -1 } + +--- +"Fields mapped as dense_vector have correct cluster stats min max values": + - skip: + version: ' - 8.11.1' + reason: 'Bug fix was added in 8.11.2' + + - do: + index: + index: foo-mapped-index + id: "1" + refresh: true + body: + my_dense_vector_field: [ + 233.8598,-17.6827,283.4271,-329.1247,-402.9721,404.7866,-358.7031,-267.4074,441.8363,320.2389,-128.0179,339.544,196.2018,-60.2688,336.0228,-440.1943,318.6882,-158.2596,277.0925,-487.4971,-338.9865,-275.716,136.8547,-253.6206,-40.2807,-357.0971,188.0344,-203.0674,449.9618,-223.2508,468.1441,302.4002,-65.0044,342.4431,205.6774,-118.636,-29.9706,183.9825,223.956,314.0691,137.0129,-8.0452,-15.131,-269.8643,-12.691,228.9777,-147.8384,-347.1117,-283.1905,459.2004,296.1321,-483.1799,414.3423,383.0187,-408.5525,-286.8169,482.5853,9.5232,-459.4968,-333.2521,109.0969,129.5107,43.4369,455.8283,-4.0423,-318.5019,339.1641,416.3581,-309.0429,84.2325,-355.8753,264.7671,43.8922,-298.6039,412.4413,19.4198,-251.279,-191.157,-478.2058,251.5709,-178.9633,479.293,188.399,380.9755,268.6575,120.3467,-322.0305,-255.4894,-377.515,56.9153,-133.9486,156.2546,-428.9581,-54.994,28.2146,158.7121,-426.7307,491.0086,-150.7205,-233.1005,244.5174,45.911,-406.1181,233.1636,175.9334,414.2805,421.7396,-322.8029,-252.2412,35.7622,318.5223,-141.5121,-375.4407,380.3081,222.1228,443.7844,367.377,-202.9594,-493.6231,-184.2242,-253.9838,463.1952,-416.3887,252.0867,-63.5317,411.0727,98.6261,330.7369,363.5685,-498.1848,-413.7246,-2.5996,-238.3547,-355.6041,-303.698,43.6266,383.1105,-72.3066,274.7491,321.9322,220.9543,-30.5578,400.0891,-181.7069,-386.4403,497.2206,-408.9611,138.485,-133.5666,-340.2569,-223.6313,270.884,-215.9399,74.3931,-244.1364,353.4219,-156.9905,488.3148,96.352,401.8525,-468.8344,129.9715,-27.1953,-168.631,187.7049,-336.5255,331.0652,204.3538,36.0182,366.8502,-468.6579,478.1409,-332.6136,-281.8499,63.7165,-458.8161,14.8894,-145.6397,267.1499,85.2025,326.3764,-419.6361,-133.9626,102.0618,443.3099,-207.9032,132.7032,234.001,-26.0754,105.6478,174.1252,-403.3511,-164.9714,-262.9344,-58.9668,357.6414,355.7508,-331.8443,153.5733,417.5712,260.7394,-150.1053,-435.6525,-364.1558,328.6183,-270.0863,107.1746,345.7998,480.8749,206.3896,-498.237,495.0835,481.9384,418.5571,-246.5213,-363.7304,311.7076,-53.1664,-297.3839,122.3105,-13.9226,-145.9754,-189.1748,460.9375,194.5417,-28.1346,-261.2177,-88.8396,-254.6407,-465.3148,-169.5377,24.3113,-116.2323,-420.3526,317.2107,-231.6227,-270.8239,387.8598,412.4251,428.1373,308.2044,275.2082,402.3663,-209.9843,-492.7269,225.1948,326.469,207.3557,-131.7677,371.9408,-139.3098,324.205,-126.6204,-335.0853,-248.2587,-344.907,307.2109,-441.3296,-318.027,414.6535,172.0537,-280.4991,331.0475,-158.0178,-285.1951,12.3632,149.9347,282.8302,-91.5624,-180.6097,496.0881,368.2567,357.6875,-194.2106,48.9213,-479.2956,-165.139,238.7811,302.7007,297.2805,208.7099,-5.5755,-85.7911,-358.1111,344.6131,415.7199,-219.1525,490.5003,-46.0096,498.2818,-91.8067,384.0104,396.1107,408.2827,-5.3919,-333.7992,-168.985,273.72,359.7125,227.7621,158.3406,-366.9722,3.7709,27.2728,71.9754,269.5792,-365.281,117.9152,-184.3682,356.9013,-142.6579,-496.7598,122.0194,89.1247,4.1914,-81.9905,465.0841,115.4727,169.6116,-199.9951,-223.3149,-447.3022,11.831,320.2368,105.1316,344.2462,8.6333,62.2285,-70.3944,-284.6694,-482.4229,-448.1569,-237.7858,222.3921,-172.1386,-312.5756,-390.0565,398.951,119.9784,-419.6537,121.3186,481.3011,-181.6662,-56.0219,424.1359,7.1461,138.8567,-307.0606,334.066,254.0897,473.7227,45.5936,133.7268,49.5334,-283.3406,179.4466,105.6191,-30.4162,271.5774,6.1156,110.4732,286.4325,13.3431,494.0139,-371.7624,283.3652,272.0558,-302.343,122.7245,-463.9261,299.9807,282.4502,-262.4911,183.4289,222.7474,-229.5973,141.6188,262.5468,278.1155,-331.0891,-393.6027,-230.1461,201.6657,-93.3604,-395.8877,-125.2013,-222.973,368.3759,234.6628,-28.6809,-151.0703,432.0315,253.1214,430.7065,-143.6963,499.84,85.1683,280.4354,196.6013,139.0476,120.8148,-398.8155,-335.5504,229.0516,403.8604,-383.9868,-79.975,-152.77,220.4036,135.0355,238.2176,-242.3085,-177.0743,381.8202,411.167,378.0153,456.5976,364.013,24.2316,-395.4659,-210.2581,138.7539,479.7398,-291.7797,-123.0491,188.9817,42.8931,-354.4479,358.853,-43.6168,-190.6656,-103.3037,47.8915,-358.5402,374.9758,493.9951,-427.2376,-119.1142,-453.2975,-326.2696,-212.8273,-142.2931,-179.795,355.77,-156.2903,331.2006,451.9252,185.2944,-96.1941,173.0447,345.2744,43.0151,381.7845,-143.4125,84.654,-208.7053,-293.141,333.6349,-80.472,-376.9817,214.6298,-43.0931,-254.7834,-421.6961,-368.844,467.5544,-418.61,-66.6824,-350.2671,348.8241,252.3495,41.8677,-128.869,90.0391,-136.7405,-136.7822,489.8074,-396.8204,63.8355,323.9557,-83.6674,451.263,152.8955,-291.7497,410.0787,-299.7468,51.34,-298.6066,-58.853,325.911,-281.9541,-15.3457,299.1325,-347.4959,388.407,343.1096,28.1816,24.3013,-111.3312,190.5583,279.9848,-479.8894,123.2182,233.8425,-466.2128,-134.7122,217.8674,432.9523,-186.799,-477.2512,-223.5514,64.274,141.5251,-161.2187,150.2791,-228.1087,81.172,451.0879,-230.3818,-304.9398,402.1081,199.1266,275.3423,-123.9548,-21.1815,-384.544,446.9626,208.9692,-337.4827,-58.1011,344.2642,230.2868,44.9176,245.9885,-284.1875,-351.6104,108.1289,459.649,191.4334,53.591,136.7139,10.5912,-15.8411,62.8305,448.5256,194.7705,-356.3214,84.4996,-133.2502,-358.6308,262.7949,219.8741,-355.3985,468.2922,243.7227,-408.3166,188.6111,-221.7264,-286.8234,-340.3046,-224.5375,332.2615,73.2788,-24.7857,-485.2204,-136.7196,-162.9693,92.6017,-99.611,-186.5203,495.5483,240.8051,409.6493,-58.1321,-154.1239,-335.9719,-82.4408,-471.3057,-43.373,301.0884,-96.6359,-236.6906,435.7313,-227.7263,-406.8904,-392.3187,169.0043,-371.0852,-271.3652,-57.4466,-196.8455,52.741,361.7395,-117.8599,190.5339,276.6457,-321.9851,425.881,-473.2662,-74.2968,221.3612,-465.4429,181.723,-78.4508,21.6152,148.8107,-166.1687,-281.6391,-462.3636,-420.5255,-161.4143,98.8383,-374.5345,-366.2851,187.1506,-405.1865,239.4847,-246.8352,33.1748,-344.1211,477.9759,-294.1354,-359.5015,-44.8454,151.7072,-22.7324,-260.3293,99.1414,-20.5536,173.3766,-422.6692,458.3853,-199.7898,-236.3929,365.2599,-66.4191,388.3472,283.0336,-268.9463,269.5704,360.9679,-322.102,-407.0705,-93.0994,338.9108,-189.1359,-216.9102,-249.0153,122.6058,-254.8318,-112.2771,-279.0506,-168.4431,392.888,394.7607,468.0544,340.1852,-293.1288,-8.2912,-419.2608,323.3382,-93.8793,-242.0672,427.7716,-441.6906,128.3229,424.4679,-71.8586,134.5411,-74.5205,18.4141,17.7277,126.9123,-137.6119,33.3783,222.9912,-279.3582,89.1226,-90.031,12.7221,98.7767,-80.2372,-485.9212,-481.6575,-325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-201.118,332.1516,425.2758,138.1284,-229.4302,432.9081,2.9898,-437.7631,-448.2151,129.9126,-170.2405,499.0396,-48.2137,363.8046,-423.2511,-28.0804,-267.826,-356.6288,-99.9371,-409.8465,170.4902,-269.2584,-277.4098,300.8819,-142.5889,339.0952,16.2275,-310.8646,201.0733,-495.5905,341.9279,-149.1184,-494.4928,-81.7343,209.9762,273.4892,380.3163,359.2424,-242.5,-42.1268,-303.9792,11.6018,361.5483,416.4178,10.3282,195.9796,148.8096,-60.9724,-205.5221,-145.4574,-341.5913,426.8996,-19.5843,60.6265,-133.4191,-139.8737,281.7465,461.2854,-270.8902,61.0182,-58.6791,-254.0193,-234.1206,-208.7334,39.7498,-14.337,-68.2319,-342.2756,403.6834,401.6122,-166.1637,47.3592,-325.7,274.5459,343.4873,328.3783,-370.1657,-122.8967,-231.3182,122.6609,119.2685,-223.5437,-210.8076,116.5022,340.2814,256.1852,-217.3487,-150.9598,331.1343,-453.8182,-448.0842,-95.2475,-340.9942,-416.7835,-96.7226,-328.7212,-373.4337,472.2214,-484.522,-465.1583,330.0712,73.2052,-55.1266,-352.8984,341.0742,-230.4845,321.0752,236.2116,35.1902,75.3489,-469.4042,110.2036,35.1156,454.7224,103.0685,-221.7499,-23.6898,-259.2362,-110.509,-261.0039,219.2391,-139.9404,155.7723,377.9713,434.0318,-365.1397,459.1471,-318.5774,323.4256,194.325,-311.9529,-153.9019,-346.5811,76.4069,443.2121,-199.407,495.6636,-138.5213,-145.3432,-151.7758,-365.3547,263.6507,-491.1686,-183.5585,-12.6044,318.5346,-443.8639,-179.0338,477.9093,-355.5118,-423.0035,-229.1166,-96.7782,-479.2384,192.9085,223.3407,-302.9472,297.3847,477.584,-297.5958,168.6023,-80.6912,-89.8717,87.1476,-129.7807,346.5576,-253.9729,-399.6858,-389.5785,35.1648,-180.451,-49.6084,83.9582,-185.2329,97.283,195.5249,-91.6969,199.202,-449.792,333.4825,-113.7558,443.434,394.3587,-94.9074,71.2092,-251.1774,-85.047,-46.4004,20.2595,341.1073,-91.2527,86.3775,303.1247,-336.9011,343.9894,-384.1261,154.4411,-465.2493,-63.3249,488.0231,348.6725,458.2093,322.401,220.2532,283.3734,-386.4252,-256.5262,-87.2205,96.8199,47.6908,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-84.9038,358.3475,-88.9271,499.7721,-161.7403,355.4894,313.6211,-176.1703,61.8427,107.603,-176.063,-426.5408,292.3612,58.3331,-115.8853,471.4131,-76.4815,-309.6263,361.4518,192.4763,-145.7968,256.3888,133.335,-474.0901,-366.9793,-495.223,457.2366,170.056,285.0152,89.8213,225.2251,354.1822,-298.374,-332.9164,-55.2409,306.9283,25.9392,218.0624,7.5085,-151.8768,-155.4932,6.0001,201.4506,-259.9874,485.1078,-362.8516,-230.1434,-398.2512,243.0012,32.302,-197.91,144.1195,-89.4196,-44.0399,-371.7866,227.6007,492.7526,499.3824,162.2475,279.0325,177.0781,341.0137,199.6009,108.1678,312.2319,-211.5001,-92.675,357.0513,-337.924,-348.984,-350.3677,173.3473,-193.7346,-318.5609,-2.0928,46.6287,-346.8513,36.634,-277.4949,-149.325,481.1378,370.3864,-139.6689,-332.2805,48.0292,109.8363,494.6994,373.6992,495.7442,400.4998,-26.2276,-308.7669,188.9497,257.9182,-116.6944,269.8932,197.005,123.1139,-356.2058,485.1982,-4.0119,397.8434,-204.67,-494.5133,-414.1299,142.1512,-36.5446,390.0718,6.9876,263.1216,457.5598,89.6086,-266.3804,17.3457,88.8182,236.6271,81.175,-170.2249,-5.7664,422.7852,180.3349,-135.2642,149.2285,-70.6607,-46.169,-389.3313,230.6125,388.4853,-438.3426,111.8034,300.0416,37.5604,-437.3868,-114.1336,312.7777,-99.1161,-312.9015,-147.3787,-434.0536,19.5034,141.706,-281.4504,-208.9608,281.4619,-361.0596,-464.2757,77.8205,232.5575,165.4104,424.8738,124.5555,342.038,86.7543,278.0216,311.2686,337.834,-90.0545,-210.1143,-488.4095,-80.7535,92.3731,-122.622,-288.0571,1.7285,-5.2998,100.0717,-395.0571,-477.5587,-160.5642,-119.4214,-232.233,415.7276,-204.3216,-436.7766,-103.4644,-427.0939,-31.0927,-440.2919,120.5971,-223.3623,-199.0988,304.8697,432.5731,-231.5791,-397.696,306.4134,330.1018,32.4345,-175.719,464.6091,-291.5686,300.1631,-167.4592,238.9574,104.5893,-187.2215,-294.0111,-361.9094,480.6847,-304.2133,-448.7144,67.7235,-255.9669,254.7379,464.5465,6.8909,-368.7554,337.5993,39.1928,-376.0625,433.4224,-109.1488,341.7731,377.843,446.839,-192.283,251.1592,437.6812,-478.3409 + ] + - do: + cluster.health: + wait_for_events: languid + + - do: + indices.get_mapping: + index: foo-mapped-index + + # sanity + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.type: dense_vector } + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.index: true } + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.similarity: cosine } + - match: { foo-mapped-index.mappings.properties.my_dense_vector_field.dims: 1276 } + + - do: + cluster.stats: { } + + - match: { indices.mappings.field_types.0.name: dense_vector } + - match: { indices.mappings.field_types.0.count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_count: 1 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_min: 1276 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_max: 1276 } + + - do: + index: + index: bar-mapped-index + id: "1" + refresh: true + body: + my_dense_vector_field: [ + 325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-66.756 + ] + - do: + cluster.health: + wait_for_events: languid + + - do: + indices.get_mapping: + index: bar-mapped-index + + # sanity + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.type: dense_vector } + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.index: true } + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.similarity: cosine } + - match: { bar-mapped-index.mappings.properties.my_dense_vector_field.dims: 164 } + + - do: + cluster.stats: { } + + - match: { indices.mappings.field_types.0.name: dense_vector } + - match: { indices.mappings.field_types.0.count: 2 } + - match: { indices.mappings.field_types.0.indexed_vector_count: 2 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_min: 164 } + - match: { indices.mappings.field_types.0.indexed_vector_dim_max: 1276 } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml index bc3479b705180..7e78450931df5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml @@ -6,19 +6,19 @@ setup: index: index: test id: "1" - body: { id: 1, foo: bar, age: 18 } + body: { id: 1, foo: bar, age: 18, birth: "2022-01-01" } - do: index: index: test id: "42" - body: { id: 42, foo: bar, age: 18 } + body: { id: 42, foo: bar, age: 18, birth: "2022-02-01" } - do: index: index: test id: "172" - body: { id: 172, foo: bar, age: 24 } + body: { id: 172, foo: bar, age: 24, birth: "2022-03-01" } - do: indices.create: @@ -28,7 +28,7 @@ setup: index: index: test2 id: "45" - body: { id: 45, foo: bar, age: 19 } + body: { id: 45, foo: bar, age: 19, birth: "2023-01-01" } - do: indices.refresh: @@ -235,3 +235,32 @@ setup: close_point_in_time: body: id: "$point_in_time_id" + +--- +"point-in-time with index filter": + - skip: + version: " - 8.11.99" + reason: "support for index filter was added in 8.12" + - do: + open_point_in_time: + index: test* + keep_alive: 5m + body: { index_filter: { range: { birth: { gte: "2023-01-01" }}}} + - set: {id: point_in_time_id} + + - do: + search: + body: + size: 1 + pit: + id: "$point_in_time_id" + + - match: {hits.total.value: 1 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._index: test2 } + - match: {hits.hits.0._id: "45" } + + - do: + close_point_in_time: + body: + id: "$point_in_time_id" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml new file mode 100644 index 0000000000000..38aaaa9847efb --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -0,0 +1,344 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ingest simulate added in 8.12' + +--- +"Test no pipelines": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-2", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: [] } + - match: { docs.1.doc._index: "index-2" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: [] } + +--- +"Test existing index with pipelines": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.create: + index: index + body: + settings: + default_pipeline: "my-pipeline" + final_pipeline: "my-final-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + +--- +"Test index templates with pipelines": + + - skip: + features: headers + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102339" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-final-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.put_index_template: + name: my-template + body: + index_patterns: index-* + template: + settings: + default_pipeline: "my-pipeline" + final_pipeline: "my-final-pipeline" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + ] + } + } + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + - match: { docs.1.doc._index: "index-1" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline", "my-final-pipeline"] } + +--- +"Test bad pipeline substitution": + + - skip: + features: headers + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102339" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + indices.put_index_template: + name: my-template + body: + index_patterns: index-* + template: + settings: + default_pipeline: "my-pipeline" + + - do: + catch: "request" + headers: + Content-Type: application/json + simulate.ingest: + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + { + "non-existent-processor": { + } + } + ] + } + } + } + - match: { status: 500 } + +--- +"Test index in path": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: "test-index" + body: > + { + "docs": [ + { + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "test-index" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: [] } + - match: { docs.1.doc._index: "test-index" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: [] } + +--- +"Test pipeline in query param": + + - skip: + features: headers + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "my-pipeline" + body: > + { + "processors": [ + ] + } + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + simulate.ingest: + pipeline: "my-pipeline" + body: > + { + "docs": [ + { + "_index": "index-1", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index-2", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.doc._index: "index-1" } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc.executed_pipelines: ["my-pipeline"] } + - match: { docs.1.doc._index: "index-2" } + - match: { docs.1.doc._source.foo: "rab" } + - match: { docs.1.doc.executed_pipelines: ["my-pipeline"] } diff --git a/server/build.gradle b/server/build.gradle index 0e154d2287b56..01879e232634b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -141,9 +141,11 @@ sourceSets.main.compiledBy(generateModulesList, generatePluginsList) if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.failure_store_feature_flag_enabled', 'true' } tasks.named("internalClusterTest").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' + systemProperty 'es.failure_store_feature_flag_enabled', 'true' } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 271984fd1ae5e..e01241da4db91 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -45,8 +45,8 @@ import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.ExplainRequest; +import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetAction; @@ -55,7 +55,6 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.replication.TransportReplicationActionTests; @@ -102,7 +101,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -234,7 +233,7 @@ public void testUpdate() { interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); - client().prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); + prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "id").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); UpdateResponse updateResponse = internalCluster().coordOnlyNodeClient().update(updateRequest).actionGet(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); @@ -264,7 +263,7 @@ public void testUpdateDelete() { interceptTransportActions(updateShardActions); String indexOrAlias = randomIndexOrAlias(); - client().prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); + prepareIndex(indexOrAlias).setId("id").setSource("field", "value").get(); UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "id").script( new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op='delete'", Collections.emptyMap()) ); @@ -318,7 +317,7 @@ public void testGet() { } public void testExplain() { - String explainShardAction = ExplainAction.NAME + "[s]"; + String explainShardAction = TransportExplainAction.TYPE.name() + "[s]"; interceptTransportActions(explainShardAction); ExplainRequest explainRequest = new ExplainRequest(randomIndexOrAlias(), "id").query(QueryBuilders.matchAllQuery()); @@ -555,14 +554,15 @@ public void testSearchQueryThenFetch() throws Exception { String[] randomIndicesOrAliases = randomIndicesOrAliases(); for (int i = 0; i < randomIndicesOrAliases.length; i++) { - client().prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); + prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); } refresh(); SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.QUERY_THEN_FETCH); - SearchResponse searchResponse = internalCluster().coordOnlyNodeClient().search(searchRequest).actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertNoFailuresAndResponse( + internalCluster().coordOnlyNodeClient().search(searchRequest), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)) + ); clearInterceptedActions(); assertIndicesSubset( @@ -584,14 +584,15 @@ public void testSearchDfsQueryThenFetch() throws Exception { String[] randomIndicesOrAliases = randomIndicesOrAliases(); for (int i = 0; i < randomIndicesOrAliases.length; i++) { - client().prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); + prepareIndex(randomIndicesOrAliases[i]).setId("id-" + i).setSource("field", "value").get(); } refresh(); SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.DFS_QUERY_THEN_FETCH); - SearchResponse searchResponse = internalCluster().coordOnlyNodeClient().search(searchRequest).actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertNoFailuresAndResponse( + internalCluster().coordOnlyNodeClient().search(searchRequest), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)) + ); clearInterceptedActions(); assertIndicesSubset( @@ -608,10 +609,6 @@ private static void assertSameIndices(IndicesRequest originalRequest, String... assertSameIndices(originalRequest, false, actions); } - private static void assertSameIndicesOptionalRequests(IndicesRequest originalRequest, String... actions) { - assertSameIndices(originalRequest, true, actions); - } - private static void assertSameIndices(IndicesRequest originalRequest, boolean optional, String... actions) { for (String action : actions) { List requests = consumeTransportRequests(action); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java index c7082f7979ed9..a30d654900c20 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java @@ -43,7 +43,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testSimulatedSearchRejectionLoad() throws Throwable { for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "1").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "1").get(); } int numberOfAsyncOps = randomIntBetween(200, 700); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 6a3a7ccfe221a..05e3b81c3683f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -100,9 +100,9 @@ public void onFailure(Exception e) { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "value1"), - client().prepareIndex("test").setId("2").setSource("field1", "value2"), - client().prepareIndex("test").setId("3").setSource("field1", "value3") + prepareIndex("test").setId("1").setSource("field1", "value1"), + prepareIndex("test").setId("2").setSource("field1", "value2"), + prepareIndex("test").setId("3").setSource("field1", "value3") ); ensureSearchable(); while (latch.getCount() > 0) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index bbd1ea67b7ef8..07c6ba4945eaa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -225,7 +225,6 @@ public void testCancelTaskMultipleTimes() throws Exception { assertFalse(cancelFuture.isDone()); allowEntireRequest(rootRequest); assertThat(cancelFuture.actionGet().getTaskFailures(), empty()); - assertThat(cancelFuture.actionGet().getTaskFailures(), empty()); waitForRootTask(mainTaskFuture, false); CancelTasksResponse cancelError = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 46737571a15ab..502c60b4a3402 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -26,9 +26,8 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationActionTests; @@ -83,6 +82,7 @@ import static org.elasticsearch.core.TimeValue.timeValueSeconds; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -304,7 +304,7 @@ public void testTransportBulkTasks() { ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks // ensures the mapping is available on all nodes so we won't retry the request (in case replicas don't have the right mapping). indicesAdmin().preparePutMapping("test").setSource("foo", "type=keyword").get(); - client().prepareBulk().add(client().prepareIndex("test").setId("test_id").setSource("{\"foo\": \"bar\"}", XContentType.JSON)).get(); + client().prepareBulk().add(prepareIndex("test").setId("test_id").setSource("{\"foo\": \"bar\"}", XContentType.JSON)).get(); // the bulk operation should produce one main task List topTask = findEvents(BulkAction.NAME, Tuple::v1); @@ -349,12 +349,11 @@ public void testTransportBulkTasks() { } public void testSearchTaskDescriptions() { - registerTaskManagerListeners(SearchAction.NAME); // main task - registerTaskManagerListeners(SearchAction.NAME + "[*]"); // shard task + registerTaskManagerListeners(TransportSearchAction.TYPE.name()); // main task + registerTaskManagerListeners(TransportSearchAction.TYPE.name() + "[*]"); // shard task createIndex("test"); ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks - client().prepareIndex("test") - .setId("test_id") + prepareIndex("test").setId("test_id") .setSource("{\"foo\": \"bar\"}", XContentType.JSON) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); @@ -366,24 +365,23 @@ public void testSearchTaskDescriptions() { assertNoFailures(client().filterWithHeader(headers).prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())); // the search operation should produce one main task - List mainTask = findEvents(SearchAction.NAME, Tuple::v1); + List mainTask = findEvents(TransportSearchAction.TYPE.name(), Tuple::v1); assertEquals(1, mainTask.size()); assertThat(mainTask.get(0).description(), startsWith("indices[test], search_type[")); assertThat(mainTask.get(0).description(), containsString("\"query\":{\"match_all\"")); assertTaskHeaders(mainTask.get(0)); // check that if we have any shard-level requests they all have non-zero length description - List shardTasks = findEvents(SearchAction.NAME + "[*]", Tuple::v1); + List shardTasks = findEvents(TransportSearchAction.TYPE.name() + "[*]", Tuple::v1); for (TaskInfo taskInfo : shardTasks) { assertThat(taskInfo.parentTaskId(), notNullValue()); assertEquals(mainTask.get(0).taskId(), taskInfo.parentTaskId()); assertTaskHeaders(taskInfo); switch (taskInfo.action()) { - case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.QUERY_CAN_MATCH_NAME, - SearchTransportService.DFS_ACTION_NAME -> assertTrue( - taskInfo.description(), - Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) - ); + case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.DFS_ACTION_NAME -> assertTrue( + taskInfo.description(), + Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) + ); case SearchTransportService.QUERY_ID_ACTION_NAME -> assertTrue( taskInfo.description(), Regex.simpleMatch("id[*], indices[test]", taskInfo.description()) @@ -449,7 +447,7 @@ public void onTaskRegistered(Task task) { } // Need to run the task in a separate thread because node client's .execute() is blocked by our task listener index = new Thread(() -> { - DocWriteResponse indexResponse = client().prepareIndex("test").setSource("test", "test").get(); + DocWriteResponse indexResponse = prepareIndex("test").setSource("test", "test").get(); assertArrayEquals(ReplicationResponse.NO_FAILURES, indexResponse.getShardInfo().getFailures()); }); index.start(); @@ -771,17 +769,19 @@ public void testTaskStoringSuccessfulResult() throws Exception { assertNoFailures(indicesAdmin().prepareRefresh(TaskResultsService.TASK_INDEX).get()); - SearchResponse searchResponse = prepareSearch(TaskResultsService.TASK_INDEX).setSource( - SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action())) - ).get(); - - assertEquals(1L, searchResponse.getHits().getTotalHits().value); - - searchResponse = prepareSearch(TaskResultsService.TASK_INDEX).setSource( - SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId())) - ).get(); + assertHitCount( + prepareSearch(TaskResultsService.TASK_INDEX).setSource( + SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action())) + ), + 1L + ); - assertEquals(1L, searchResponse.getHits().getTotalHits().value); + assertHitCount( + prepareSearch(TaskResultsService.TASK_INDEX).setSource( + SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId())) + ), + 1L + ); GetTaskResponse getResponse = expectFinishedTask(taskId); assertEquals(result, getResponse.getTask().getResponseAsMap()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index ff43db96a0057..3aee1fdf505fe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -62,7 +62,7 @@ public void testVerifyRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); + VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository("test-repo-blocks").get(); assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); @@ -100,7 +100,7 @@ public void testGetRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories("test-repo-blocks").execute().actionGet(); + GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories("test-repo-blocks").get(); assertThat(response.repositories(), hasSize(1)); } finally { setClusterReadOnly(false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index d3dbccbb6d6e5..b6b0b2e54e691 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -46,11 +46,11 @@ protected void setUpRepository() throws Exception { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(INDEX_NAME).setSource("test", "init").execute().actionGet(); + prepareIndex(INDEX_NAME).setSource("test", "init").get(); } docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(OTHER_INDEX_NAME).setSource("test", "init").execute().actionGet(); + prepareIndex(OTHER_INDEX_NAME).setSource("test", "init").get(); } logger.info("--> register a repository"); @@ -69,8 +69,7 @@ protected void setUpRepository() throws Exception { CreateSnapshotResponse snapshotResponse = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) .setIncludeGlobalState(true) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(snapshotResponse.status(), equalTo(RestStatus.OK)); ensureSearchable(); } @@ -90,8 +89,7 @@ public void testCreateSnapshotWithBlocks() { logger.info("--> creating a snapshot is allowed when the cluster is not read only"); CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(response.status(), equalTo(RestStatus.OK)); } @@ -153,8 +151,7 @@ public void testRestoreSnapshotWithBlocks() { logger.info("--> creating a snapshot is allowed when the cluster is not read only"); RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(response.status(), equalTo(RestStatus.OK)); assertTrue(indexExists(INDEX_NAME)); assertTrue(indexExists(OTHER_INDEX_NAME)); @@ -164,7 +161,7 @@ public void testGetSnapshotWithBlocks() { // This test checks that the Get Snapshot operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(REPOSITORY_NAME).execute().actionGet(); + GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(REPOSITORY_NAME).get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).snapshotId().getName(), equalTo(SNAPSHOT_NAME)); } finally { @@ -176,10 +173,7 @@ public void testSnapshotStatusWithBlocks() { // This test checks that the Snapshot Status operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(REPOSITORY_NAME) - .setSnapshots(SNAPSHOT_NAME) - .execute() - .actionGet(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(REPOSITORY_NAME).setSnapshots(SNAPSHOT_NAME).get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).getState().completed(), equalTo(true)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java index 3ace029d57521..4d37f75894d56 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java @@ -37,8 +37,7 @@ public void testClearIndicesCacheWithBlocks() { .setFieldDataCache(true) .setQueryCache(true) .setFieldDataCache(true) - .execute() - .actionGet(); + .get(); assertNoFailures(clearIndicesCacheResponse); assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index e5edeccbad55d..c0d62ba54621a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -116,7 +116,7 @@ public void testWriteToAliasPrimaryAutoCreatedFirst() throws Exception { client().execute(AutoCreateAction.INSTANCE, request).get(); } - DocWriteResponse response = client().prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); + DocWriteResponse response = prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); } @@ -135,7 +135,7 @@ public void testWriteToAliasSecondaryAutoCreatedFirst() throws Exception { client().execute(AutoCreateAction.INSTANCE, request).get(); } - DocWriteResponse response = client().prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); + DocWriteResponse response = prepareIndex(INDEX_NAME).setSource("{\"foo\":\"bar\"}", XContentType.JSON).get(); assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); } @@ -205,18 +205,20 @@ public void testAutoCreateSystemAliasViaV1TemplateAllowsTemplates() throws Excep } private String autoCreateSystemAliasViaComposableTemplate(String indexName) throws Exception { - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(indexName + "*"), - new Template( - null, - null, - Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) - ), - Collections.emptyList(), - 4L, - 5L, - Collections.emptyMap() - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(indexName + "*")) + .template( + new Template( + null, + null, + Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) + ) + ) + .componentTemplates(Collections.emptyList()) + .priority(4L) + .version(5L) + .metadata(Collections.emptyMap()) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index 93d12c686297f..d006192579ead 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -39,7 +39,7 @@ public void testCreateCloneIndex() { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } internalCluster().ensureAtLeastNumDataNodes(2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -89,7 +89,7 @@ public void testCreateCloneIndex() { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertHitCount(prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")), 2 * docs); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 4b395ec6856e5..b4d0286b74077 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -206,7 +206,6 @@ public void testInvalidShardCountSettingsWithoutPrefix() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96578") public void testCreateAndDeleteIndexConcurrently() throws InterruptedException { createIndex("test"); final AtomicInteger indexVersion = new AtomicInteger(0); @@ -214,7 +213,7 @@ public void testCreateAndDeleteIndexConcurrently() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setSource("index_version", indexVersion.get()).get(); + prepareIndex("test").setSource("index_version", indexVersion.get()).get(); } synchronized (indexVersionLock) { // not necessarily needed here but for completeness we lock here too indexVersion.incrementAndGet(); @@ -227,7 +226,7 @@ public void onResponse(AcknowledgedResponse deleteIndexResponse) { public void run() { try { // recreate that index - client().prepareIndex("test").setSource("index_version", indexVersion.get()).get(); + prepareIndex("test").setSource("index_version", indexVersion.get()).get(); synchronized (indexVersionLock) { // we sync here since we have to ensure that all indexing operations below for a given ID are done before // we increment the index version otherwise a doc that is in-flight could make it into an index that it @@ -253,10 +252,7 @@ public void onFailure(Exception e) { for (int i = 0; i < numDocs; i++) { try { synchronized (indexVersionLock) { - client().prepareIndex("test") - .setSource("index_version", indexVersion.get()) - .setTimeout(TimeValue.timeValueSeconds(10)) - .get(); + prepareIndex("test").setSource("index_version", indexVersion.get()).setTimeout(TimeValue.timeValueSeconds(10)).get(); } } catch (IndexNotFoundException inf) { // fine @@ -339,7 +335,7 @@ public void testInvalidPartitionSize() { .put("index.number_of_shards", shards) .put("index.number_of_routing_shards", shards) .put("index.routing_partition_size", partitionSize) - ).execute().actionGet(); + ).get(); } catch (IllegalStateException | IllegalArgumentException e) { return false; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java index a0dffa8b7caa8..1c075442d99e6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java @@ -194,18 +194,20 @@ public void testCreateSystemAliasViaV1TemplateAllowsTemplates() throws Exception } private void createIndexWithComposableTemplates(String indexName, String primaryIndexName) throws Exception { - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList(indexName + "*"), - new Template( - null, - null, - Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) - ), - Collections.emptyList(), - 4L, - 5L, - Collections.emptyMap() - ); + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList(indexName + "*")) + .template( + new Template( + null, + null, + Map.of(indexName + "-composable-alias", AliasMetadata.builder(indexName + "-composable-alias").build()) + ) + ) + .componentTemplates(Collections.emptyList()) + .priority(4L) + .version(5L) + .metadata(Collections.emptyMap()) + .build(); assertAcked( client().execute( PutComposableIndexTemplateAction.INSTANCE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index b0ec5de81984a..8f6026da835b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -81,10 +81,7 @@ public void testCreateShrinkIndexToN() { internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source") - .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) - .get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -109,8 +106,7 @@ public void testCreateShrinkIndexToN() { assertHitCount(prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); for (int i = 0; i < 20; i++) { // now update - client().prepareIndex("first_shrink") - .setId(Integer.toString(i)) + prepareIndex("first_shrink").setId(Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) .get(); } @@ -142,8 +138,7 @@ public void testCreateShrinkIndexToN() { assertHitCount(prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); for (int i = 0; i < 20; i++) { // now update - client().prepareIndex("second_shrink") - .setId(Integer.toString(i)) + prepareIndex("second_shrink").setId(Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) .get(); } @@ -238,7 +233,7 @@ public void testCreateShrinkIndex() { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -318,7 +313,7 @@ public void testCreateShrinkIndex() { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertHitCount(prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")), 2 * docs); @@ -344,7 +339,7 @@ public void testCreateShrinkIndexFails() throws Exception { Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0) ).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -424,10 +419,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { .put("number_of_replicas", 0) ).setMapping("id", "type=keyword,doc_values=true").get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source") - .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON) - .get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -465,14 +457,14 @@ public void testCreateShrinkWithIndexSort() throws Exception { assertNoResizeSourceIndexSettings("target"); flushAndRefresh(); - GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").execute().actionGet(); + GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").get(); assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); assertSortedSegments("target", expectedIndexSort); // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); @@ -483,7 +475,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { Settings.builder().put(indexSettings()).put("index.number_of_replicas", 0).put("number_of_shards", 5) ).get(); for (int i = 0; i < 30; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } indicesAdmin().prepareFlush("source").get(); Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); @@ -610,8 +602,7 @@ static void assertNoResizeSourceIndexSettings(final String index) { .clear() .setMetadata(true) .setRoutingTable(true) - .execute() - .actionGet(); + .get(); IndexRoutingTable indexRoutingTable = clusterStateResponse.getState().routingTable().index(index); assertThat("Index " + index + " should have all primaries started", indexRoutingTable.allPrimaryShardsActive(), equalTo(true)); IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(index); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 54add487a3dd4..56bbe135de66b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -60,7 +60,8 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -127,8 +128,7 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha BiFunction indexFunc = (index, id) -> { try { - return client().prepareIndex(index) - .setId(Integer.toString(id)) + return prepareIndex(index).setId(Integer.toString(id)) .setSource( jsonBuilder().startObject() .field("foo", "bar") @@ -243,27 +243,28 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha assertNested("first_split", numDocs); assertNested("second_split", numDocs); } - assertAllUniqueDocs(prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); - assertAllUniqueDocs(prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); - assertAllUniqueDocs(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertAllUniqueDocs(prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); + assertAllUniqueDocs(prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); + assertAllUniqueDocs(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); } public void assertNested(String index, int numDocs) { // now, do a nested query - SearchResponse searchResponse = prepareSearch(index).setQuery( - nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)) + ); } - public void assertAllUniqueDocs(SearchResponse response, int numDocs) { - Set ids = new HashSet<>(); - for (int i = 0; i < response.getHits().getHits().length; i++) { - String id = response.getHits().getHits()[i].getId(); - assertTrue("found ID " + id + " more than once", ids.add(id)); - } - assertEquals(numDocs, ids.size()); + public void assertAllUniqueDocs(SearchRequestBuilder request, int numDocs) { + assertResponse(request, response -> { + Set ids = new HashSet<>(); + for (int i = 0; i < response.getHits().getHits().length; i++) { + String id = response.getHits().getHits()[i].getId(); + assertTrue("found ID " + id + " more than once", ids.add(id)); + } + assertEquals(numDocs, ids.size()); + }); } public void testSplitIndexPrimaryTerm() throws Exception { @@ -342,7 +343,7 @@ public void testCreateSplitIndex() throws Exception { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } internalCluster().ensureAtLeastNumDataNodes(2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -411,7 +412,7 @@ public void testCreateSplitIndex() throws Exception { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertHitCount(prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")), 2 * docs); @@ -444,10 +445,7 @@ public void testCreateSplitWithIndexSort() throws Exception { .put("number_of_replicas", 0) ).setMapping("id", "type=keyword,doc_values=true").get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source") - .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON) - .get(); + prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due @@ -478,14 +476,14 @@ public void testCreateSplitWithIndexSort() throws Exception { ); ensureGreen(); flushAndRefresh(); - GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").execute().actionGet(); + GetSettingsResponse settingsResponse = indicesAdmin().prepareGetSettings("target").get(); assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); assertSortedSegments("target", expectedIndexSort); // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java index dc5cc49092f7a..5df1ceea6bfce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java @@ -44,15 +44,12 @@ public void testDeleteIndexWithBlocks() { public void testDeleteIndexOnIndexReadOnlyAllowDeleteSetting() { createIndex("test"); ensureGreen("test"); - client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar").get(); + prepareIndex("test").setId("1").setSource("foo", "bar").get(); refresh(); try { updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE, true), "test"); assertSearchHits(prepareSearch(), "1"); - assertBlocked( - client().prepareIndex().setIndex("test").setId("2").setSource("foo", "bar"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertSearchHits(prepareSearch(), "1"); assertAcked(indicesAdmin().prepareDelete("test")); } finally { @@ -70,7 +67,7 @@ public void testClusterBlockMessageHasIndexName() { updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE, true), "test"); ClusterBlockException e = expectThrows( ClusterBlockException.class, - () -> client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar").get() + () -> prepareIndex("test").setId("1").setSource("foo", "bar").get() ); assertEquals( "index [test] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " @@ -85,15 +82,12 @@ public void testClusterBlockMessageHasIndexName() { public void testDeleteIndexOnClusterReadOnlyAllowDeleteSetting() { createIndex("test"); ensureGreen("test"); - client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar").get(); + prepareIndex("test").setId("1").setSource("foo", "bar").get(); refresh(); try { updateClusterSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true)); assertSearchHits(prepareSearch(), "1"); - assertBlocked( - client().prepareIndex().setIndex("test").setId("2").setSource("foo", "bar"), - Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK); assertBlocked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 2)), Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index 94c08bd7e8162..d6c337dec53b8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -115,7 +115,7 @@ public void testSimple() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(index).setId("id-" + i).setSource(doc).get(); + prepareIndex(index).setId("id-" + i).setSource(doc).get(); } final boolean forceNorms = randomBoolean(); if (forceNorms) { @@ -123,11 +123,11 @@ public void testSimple() throws Exception { .startObject() .field("english_text", "A long sentence to make sure that norms is non-zero") .endObject(); - client().prepareIndex(index).setId("id").setSource(doc).get(); + prepareIndex(index).setId("id").setSource(doc).get(); } // Force merge to ensure that there are more than one numeric value to justify doc value. client().admin().indices().prepareForceMerge(index).setMaxNumSegments(1).get(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); client().execute( AnalyzeIndexDiskUsageAction.INSTANCE, new AnalyzeIndexDiskUsageRequest(new String[] { index }, AnalyzeIndexDiskUsageRequest.DEFAULT_INDICES_OPTIONS, true), @@ -167,7 +167,7 @@ public void testFailOnFlush() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); + prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); } Index index = clusterService().state().metadata().index(indexName).getIndex(); List failedShards = randomSubsetOf( @@ -203,7 +203,7 @@ public void testManyShards() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); + prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); } } @@ -237,7 +237,7 @@ public void testFailingTargetShards() throws Exception { .field("english_text", English.intToEnglish(value)) .field("value", value) .endObject(); - client().prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); + prepareIndex(indexName).setId("id-" + i).setSource(doc).get(); } final Index index = resolveIndex(indexName); final List failingShards = randomSubsetOf( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java index 2dc2ef0e90009..69d4f7aaef329 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java @@ -31,7 +31,7 @@ public void testFlushWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test").setId("" + i).setSource("test", "init").get(); } // Request is not blocked @@ -44,7 +44,7 @@ public void testFlushWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - FlushResponse response = indicesAdmin().prepareFlush("test").execute().actionGet(); + FlushResponse response = indicesAdmin().prepareFlush("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java index 80d1b95442f44..a3474afc96c51 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java @@ -43,14 +43,14 @@ public void testForceMergeWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test").setId("" + i).setSource("test", "init").get(); } // Request is not blocked for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); - ForceMergeResponse response = indicesAdmin().prepareForceMerge("test").execute().actionGet(); + ForceMergeResponse response = indicesAdmin().prepareForceMerge("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { @@ -70,7 +70,7 @@ public void testForceMergeWithBlocks() { // Merging all indices is blocked when the cluster is read-only try { - ForceMergeResponse response = indicesAdmin().prepareForceMerge().execute().actionGet(); + ForceMergeResponse response = indicesAdmin().prepareForceMerge().get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java index da750103d2943..41abfc1219199 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java @@ -39,7 +39,7 @@ public void testRefreshWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - RefreshResponse response = indicesAdmin().prepareRefresh("test").execute().actionGet(); + RefreshResponse response = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index d7e4e42b73554..7ae7fc5c4a180 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -794,7 +794,9 @@ public void testRolloverConcurrently() throws Exception { null, null ); - putTemplateRequest.indexTemplate(new ComposableIndexTemplate(List.of("test-*"), template, null, 100L, null, null)); + putTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder().indexPatterns(List.of("test-*")).template(template).priority(100L).build() + ); assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet()); final CyclicBarrier barrier = new CyclicBarrier(numOfThreads); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java index 3a7df923a3e0c..0705e1216af43 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java @@ -29,7 +29,7 @@ public void testIndicesSegmentsWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test-blocks").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test-blocks").setId("" + i).setSource("test", "init").get(); } indicesAdmin().prepareFlush("test-blocks").get(); @@ -42,7 +42,7 @@ public void testIndicesSegmentsWithBlocks() { )) { try { enableIndexBlock("test-blocks", blockSetting); - IndicesSegmentResponse response = indicesAdmin().prepareSegments("test-blocks").execute().actionGet(); + IndicesSegmentResponse response = indicesAdmin().prepareSegments("test-blocks").get(); assertNoFailures(response); } finally { disableIndexBlock("test-blocks", blockSetting); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index ef86bfa0bd485..310f9394f60c1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -31,7 +31,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ExecutionException; import java.util.function.Predicate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -197,14 +196,14 @@ public void testCorruptedShards() throws Exception { enableAllocation(index); } - private void indexRandomData(String index) throws ExecutionException, InterruptedException { + private void indexRandomData(String index) throws InterruptedException { int numDocs = scaledRandomIntBetween(10, 20); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(index).setSource("field", "value"); + builders[i] = prepareIndex(index).setSource("field", "value"); } indexRandom(true, builders); - indicesAdmin().prepareFlush().setForce(true).execute().actionGet(); + indicesAdmin().prepareFlush().setForce(true).get(); } private static final class IndexNodePredicate implements Predicate { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java index 93214372ef201..95b98722a8423 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java @@ -36,7 +36,7 @@ public void testIndicesStatsWithBlocks() { )) { try { enableIndexBlock("ro", blockSetting); - IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats("ro").execute().actionGet(); + IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats("ro").get(); assertNotNull(indicesStatsResponse.getIndex("ro")); } finally { disableIndexBlock("ro", blockSetting); @@ -46,7 +46,7 @@ public void testIndicesStatsWithBlocks() { // Request is blocked try { enableIndexBlock("ro", IndexMetadata.SETTING_BLOCKS_METADATA); - indicesAdmin().prepareStats("ro").execute().actionGet(); + indicesAdmin().prepareStats("ro").get(); fail("Exists should fail when " + IndexMetadata.SETTING_BLOCKS_METADATA + " is true"); } catch (ClusterBlockException e) { // Ok, a ClusterBlockException is expected diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 224db253675d2..8bc9bac2543d3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -159,8 +159,7 @@ public void testDeleteIndexWhileIndexing() throws Exception { while (stopped.get() == false && docID.get() < 5000) { String id = Integer.toString(docID.incrementAndGet()); try { - DocWriteResponse response = client().prepareIndex(index) - .setId(id) + DocWriteResponse response = prepareIndex(index).setId(id) .setSource(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) .get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java index 18a8ae2dd2800..6a2ab41fae5d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilders; @@ -26,6 +25,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -136,23 +136,23 @@ public void afterBulk(long executionId, BulkRequest request, Exception failure) indicesAdmin().refresh(new RefreshRequest()).get(); - SearchResponse results = prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get(); - assertThat(bulkProcessor.getTotalBytesInFlight(), equalTo(0L)); - if (rejectedExecutionExpected) { - assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); - } else if (rejectedAfterAllRetries) { - assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); - } else { - assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); - } + final boolean finalRejectedAfterAllRetries = rejectedAfterAllRetries; + assertResponse(prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0), results -> { + assertThat(bulkProcessor.getTotalBytesInFlight(), equalTo(0L)); + if (rejectedExecutionExpected) { + assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); + } else if (finalRejectedAfterAllRetries) { + assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); + } else { + assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); + } + }); } private static void indexDocs(BulkProcessor2 processor, int numDocs) { for (int i = 1; i <= numDocs; i++) { processor.add( - client().prepareIndex() - .setIndex(INDEX_NAME) - .setId(Integer.toString(i)) + prepareIndex(INDEX_NAME).setId(Integer.toString(i)) .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) .request() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java index 7beaebcfb87b8..85b720a03478e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java @@ -28,12 +28,12 @@ public void testBulkProcessorAutoCreateRestrictions() { internalCluster().startNode(settings); createIndex("willwork"); - clusterAdmin().prepareHealth("willwork").setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth("willwork").setWaitForGreenStatus().get(); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("wontwork").setId("2").setSource("{\"foo\":2}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("willwork").setId("3").setSource("{\"foo\":3}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("wontwork").setId("2").setSource("{\"foo\":2}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("willwork").setId("3").setSource("{\"foo\":3}", XContentType.JSON)); BulkResponse br = bulkRequestBuilder.get(); BulkItemResponse[] responses = br.getItems(); assertEquals(3, responses.length); @@ -52,7 +52,7 @@ public void testBulkProcessorAutoCreateRestrictions() { public void testIndexWithDisabledAutoCreateIndex() { updateClusterSettings(Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("-*", "+.*"))); final BulkItemResponse itemResponse = client().prepareBulk() - .add(client().prepareIndex("test-index").setSource("foo", "bar")) + .add(prepareIndex("test-index").setSource("foo", "bar")) .get() .getItems()[0]; assertThat(itemResponse.getFailure().getCause(), instanceOf(IndexNotFoundException.class)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index e664f6e6bb42f..8bd3a6cf02868 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -25,6 +24,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -131,15 +131,16 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) indicesAdmin().refresh(new RefreshRequest()).get(); - SearchResponse results = prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get(); - - if (rejectedExecutionExpected) { - assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); - } else if (rejectedAfterAllRetries) { - assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); - } else { - assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); - } + final boolean finalRejectedAfterAllRetries = rejectedAfterAllRetries; + assertResponse(prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0), results -> { + if (rejectedExecutionExpected) { + assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); + } else if (finalRejectedAfterAllRetries) { + assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); + } else { + assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); + } + }); } private void assertRetriedCorrectly(CorrelatingBackoffPolicy internalPolicy, Object bulkResponse, Throwable failure) { @@ -156,9 +157,7 @@ private void assertRetriedCorrectly(CorrelatingBackoffPolicy internalPolicy, Obj private static void indexDocs(BulkProcessor processor, int numDocs) { for (int i = 1; i <= numDocs; i++) { processor.add( - client().prepareIndex() - .setIndex(INDEX_NAME) - .setId(Integer.toString(i)) + prepareIndex(INDEX_NAME).setId(Integer.toString(i)) .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) .request() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 9433f93d91f58..00bd6ee7ee891 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -102,13 +102,12 @@ public void testBulkUpdateSimple() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("1").setSource("field", 1)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("2").setSource("field", 2).setCreate(true)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("3").setSource("field", 3)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("4").setSource("field", 4)) - .add(client().prepareIndex().setIndex(indexOrAlias()).setId("5").setSource("field", 5)) - .execute() - .actionGet(); + .add(prepareIndex(indexOrAlias()).setId("1").setSource("field", 1)) + .add(prepareIndex(indexOrAlias()).setId("2").setSource("field", 2).setCreate(true)) + .add(prepareIndex(indexOrAlias()).setId("3").setSource("field", 3)) + .add(prepareIndex(indexOrAlias()).setId("4").setSource("field", 4)) + .add(prepareIndex(indexOrAlias()).setId("5").setSource("field", 5)) + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(5)); @@ -141,17 +140,17 @@ public void testBulkUpdateSimple() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getId(), equalTo("3")); assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(2L)); - GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").execute().actionGet(); + GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L)); - getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(3L)); - getResponse = client().prepareGet().setIndex("test").setId("3").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("3").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(2L)); assertThat(getResponse.getSource().get("field1").toString(), equalTo("test")); @@ -180,15 +179,15 @@ public void testBulkUpdateSimple() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getIndex(), equalTo("test")); assertThat(bulkResponse.getItems()[2].getResponse().getVersion(), equalTo(3L)); - getResponse = client().prepareGet().setIndex("test").setId("6").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("6").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(0L)); - getResponse = client().prepareGet().setIndex("test").setId("7").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("7").get(); assertThat(getResponse.isExists(), equalTo(false)); - getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(3L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(4L)); @@ -221,12 +220,12 @@ public void testBulkUpdateWithScriptedUpsertAndDynamicMappingUpdate() throws Exc assertThat(bulkResponse.getItems()[1].getResponse().getId(), equalTo("2")); assertThat(bulkResponse.getItems()[1].getResponse().getVersion(), equalTo(1L)); - GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").execute().actionGet(); + GetResponse getResponse = client().prepareGet().setIndex("test").setId("1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L)); - getResponse = client().prepareGet().setIndex("test").setId("2").execute().actionGet(); + getResponse = client().prepareGet().setIndex("test").setId("2").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("field")).longValue(), equalTo(2L)); @@ -236,9 +235,9 @@ public void testBulkWithCAS() throws Exception { createIndex("test", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("1").setCreate(true).setSource("field", "1")) - .add(client().prepareIndex("test").setId("2").setCreate(true).setSource("field", "1")) - .add(client().prepareIndex("test").setId("1").setSource("field", "2")) + .add(prepareIndex("test").setId("1").setCreate(true).setSource("field", "1")) + .add(prepareIndex("test").setId("2").setCreate(true).setSource("field", "1")) + .add(prepareIndex("test").setId("1").setSource("field", "2")) .get(); assertEquals(DocWriteResponse.Result.CREATED, bulkResponse.getItems()[0].getResponse().getResult()); @@ -259,9 +258,9 @@ public void testBulkWithCAS() throws Exception { assertThat(bulkResponse.getItems()[2].getResponse().getSeqNo(), equalTo(4L)); bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("e1").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) - .add(client().prepareIndex("test").setId("e2").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) - .add(client().prepareIndex("test").setId("e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)) + .add(prepareIndex("test").setId("e1").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) + .add(prepareIndex("test").setId("e2").setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL)) + .add(prepareIndex("test").setId("e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)) .get(); assertEquals(DocWriteResponse.Result.CREATED, bulkResponse.getItems()[0].getResponse().getResult()); @@ -285,11 +284,10 @@ public void testBulkUpdateMalformedScripts() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("test").setId("1").setSource("field", 1)) - .add(client().prepareIndex().setIndex("test").setId("2").setSource("field", 1)) - .add(client().prepareIndex().setIndex("test").setId("3").setSource("field", 1)) - .execute() - .actionGet(); + .add(prepareIndex("test").setId("1").setSource("field", 1)) + .add(prepareIndex("test").setId("2").setSource("field", 1)) + .add(prepareIndex("test").setId("3").setSource("field", 1)) + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(3)); @@ -330,8 +328,7 @@ public void testBulkUpdateMalformedScripts() throws Exception { ) ) ) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(true)); assertThat(bulkResponse.getItems().length, equalTo(3)); @@ -385,7 +382,7 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().sourceAsMap().get("counter"), equalTo(1)); for (int j = 0; j < 5; j++) { - GetResponse getResponse = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", Integer.toString(i)).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getVersion(), equalTo(1L)); assertThat(((Number) getResponse.getSource().get("counter")).longValue(), equalTo(1L)); @@ -410,7 +407,7 @@ public void testBulkUpdateLargerVolume() throws Exception { builder.add(updateBuilder); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat(response.hasFailures(), equalTo(false)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -428,7 +425,7 @@ public void testBulkUpdateLargerVolume() throws Exception { for (int i = (numDocs / 2); i < maxDocs; i++) { builder.add(client().prepareUpdate().setIndex("test").setId(Integer.toString(i)).setScript(script)); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat(response.hasFailures(), equalTo(true)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -453,7 +450,7 @@ public void testBulkUpdateLargerVolume() throws Exception { .setScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op = \"none\"", Collections.emptyMap())) ); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat(response.buildFailureMessage(), response.hasFailures(), equalTo(false)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -472,7 +469,7 @@ public void testBulkUpdateLargerVolume() throws Exception { .setScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx.op = \"delete\"", Collections.emptyMap())) ); } - response = builder.execute().actionGet(); + response = builder.get(); assertThat("expected no failures but got: " + response.buildFailureMessage(), response.hasFailures(), equalTo(false)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { @@ -503,7 +500,7 @@ public void testBulkIndexingWhileInitializing() throws Exception { for (int i = 0; i < numDocs;) { final BulkRequestBuilder builder = client().prepareBulk(); for (int j = 0; j < bulk && i < numDocs; j++, i++) { - builder.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("val", i)); + builder.add(prepareIndex("test").setId(Integer.toString(i)).setSource("val", i)); } logger.info("bulk indexing {}-{}", i - bulk, i - 1); BulkResponse response = builder.get(); @@ -578,7 +575,7 @@ public void testThatInvalidIndexNamesShouldNotBreakCompleteBulkRequest() { } else { name = "test"; } - builder.add(client().prepareIndex().setIndex(name).setId("1").setSource("field", 1)); + builder.add(prepareIndex(name).setId("1").setSource("field", 1)); } BulkResponse bulkResponse = builder.get(); assertThat(bulkResponse.hasFailures(), is(expectFailure)); @@ -655,7 +652,7 @@ public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception { public void testFailedRequestsOnClosedIndex() throws Exception { createIndex("bulkindex1"); - client().prepareIndex("bulkindex1").setId("1").setSource("text", "test").get(); + prepareIndex("bulkindex1").setId("1").setSource("text", "test").get(); assertBusy(() -> assertAcked(indicesAdmin().prepareClose("bulkindex1"))); BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(RefreshPolicy.IMMEDIATE); @@ -678,7 +675,7 @@ public void testFailedRequestsOnClosedIndex() throws Exception { // issue 9821 public void testInvalidIndexNamesCorrectOpType() { BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("INVALID.NAME").setId("1").setSource(Requests.INDEX_CONTENT_TYPE, "field", 1)) + .add(prepareIndex("INVALID.NAME").setId("1").setSource(Requests.INDEX_CONTENT_TYPE, "field", 1)) .add(client().prepareUpdate().setIndex("INVALID.NAME").setId("1").setDoc(Requests.INDEX_CONTENT_TYPE, "field", randomInt())) .add(client().prepareDelete().setIndex("INVALID.NAME").setId("1")) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java index 6ec01c3be5626..29a5e491dd3fd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/WriteAckDelayIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; @@ -18,6 +17,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + public class WriteAckDelayIT extends ESIntegTestCase { @Override @@ -34,7 +35,7 @@ public void testIndexWithWriteDelayEnabled() throws Exception { logger.info("indexing [{}] docs", numOfDocs); List builders = new ArrayList<>(numOfDocs); for (int j = 0; j < numOfDocs; j++) { - builders.add(client().prepareIndex("test").setSource("field", "value_" + j)); + builders.add(prepareIndex("test").setSource("field", "value_" + j)); } indexRandom(true, builders); logger.info("verifying indexed content"); @@ -42,17 +43,18 @@ public void testIndexWithWriteDelayEnabled() throws Exception { for (int j = 0; j < numOfChecks; j++) { try { logger.debug("running search"); - SearchResponse response = prepareSearch("test").get(); - if (response.getHits().getTotalHits().value != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } + assertResponse(prepareSearch("test"), response -> { + if (response.getHits().getTotalHits().value != numOfDocs) { + final String message = "Count is " + + response.getHits().getTotalHits().value + + " but " + + numOfDocs + + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } + }); } catch (Exception e) { logger.error("search failed", e); throw e; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index b9e8b40c70cb8..eff681f1f281b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -29,7 +29,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -95,31 +96,32 @@ public void testBasic() { remoteClient.prepareIndex("remote_test").setId("remote_new").setSource().get(); remoteClient.admin().indices().prepareRefresh().get(); } - SearchResponse resp = localClient.prepareSearch() - .setPreference(null) - .setQuery(new MatchAllQueryBuilder()) - .setPointInTime(new PointInTimeBuilder(pitId)) - .setSize(1000) - .get(); - assertNoFailures(resp); - assertHitCount(resp, (includeLocalIndex ? localNumDocs : 0) + remoteNumDocs); - - SearchResponse.Clusters clusters = resp.getClusters(); - int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); - assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - - if (includeLocalIndex) { - SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localCluster); - assertOneSuccessfulShard(localCluster); - } - - SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteCluster); - assertOneSuccessfulShard(remoteCluster); - + assertNoFailuresAndResponse( + localClient.prepareSearch() + .setPreference(null) + .setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pitId)) + .setSize(1000), + resp -> { + assertHitCount(resp, (includeLocalIndex ? localNumDocs : 0) + remoteNumDocs); + + SearchResponse.Clusters clusters = resp.getClusters(); + int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); + assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(expectedNumClusters)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + + if (includeLocalIndex) { + SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localCluster); + assertOneSuccessfulShard(localCluster); + } + + SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteCluster); + assertOneSuccessfulShard(remoteCluster); + } + ); } finally { closePointInTime(pitId); } @@ -157,24 +159,22 @@ public void testFailuresOnOneShardsWithPointInTime() throws ExecutionException, ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); SearchRequest searchRequest = new SearchRequest(); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10).pointInTimeBuilder(new PointInTimeBuilder(pitId))); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); - assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(expectedNumClusters)); - - if (includeLocalIndex) { - SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localCluster); - assertOneFailedShard(localCluster, numShards); - } - SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteCluster); - assertOneFailedShard(remoteCluster, numShards); - + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), searchResponse -> { + SearchResponse.Clusters clusters = searchResponse.getClusters(); + int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); + assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(expectedNumClusters)); + if (includeLocalIndex) { + SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localCluster); + assertOneFailedShard(localCluster, numShards); + } + SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteCluster); + assertOneFailedShard(remoteCluster, numShards); + }); } finally { closePointInTime(pitId); } @@ -202,11 +202,11 @@ private static void assertOneFailedShard(SearchResponse.Cluster cluster, int tot private String openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } private void closePointInTime(String readerId) { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(readerId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java index 7871a14264944..f2e0511ffb7ab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/LookupRuntimeFieldIT.java @@ -40,7 +40,7 @@ public void populateIndex() throws Exception { Map.of("author", "jack", "first_name", "Jack", "last_name", "Austin", "joined", "1999-11-03") ); for (Map author : authors) { - client().prepareIndex("authors").setSource(author).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); + prepareIndex("authors").setSource(author).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); } indicesAdmin().prepareRefresh("authors").get(); @@ -126,7 +126,7 @@ public void populateIndex() throws Exception { Map.of("title", "the fifth book", "genre", "science", "author_id", "mike", "publisher_id", "p2", "published_date", "2021-06-30") ); for (Map book : books) { - client().prepareIndex("books").setSource(book).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); + prepareIndex("books").setSource(book).setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).get(); } indicesAdmin().prepareRefresh("books").get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index bb7658f5011e3..d3e312e173c29 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -50,6 +50,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -78,7 +79,7 @@ public void testBasic() { int numDocs = randomIntBetween(10, 50); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); - client().prepareIndex("test").setId(id).setSource("value", i).get(); + prepareIndex("test").setId(id).setSource("value", i).get(); } refresh("test"); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); @@ -121,7 +122,7 @@ public void testMultipleIndices() { for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); String index = "index-" + randomIntBetween(1, numIndices); - client().prepareIndex(index).setId(id).setSource("value", i).get(); + prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); String pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); @@ -135,7 +136,7 @@ public void testMultipleIndices() { for (int i = 0; i < moreDocs; i++) { String id = "more-" + i; String index = "index-" + randomIntBetween(1, numIndices); - client().prepareIndex(index).setId(id).setSource("value", i).get(); + prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); resp = prepareSearch().get(); @@ -152,13 +153,62 @@ public void testMultipleIndices() { } } + public void testIndexFilter() { + int numDocs = randomIntBetween(1, 9); + for (int i = 1; i <= 3; i++) { + String index = "index-" + i; + createIndex(index); + for (int j = 1; j <= numDocs; j++) { + String id = Integer.toString(j); + client().prepareIndex(index).setId(id).setSource("@timestamp", "2023-0" + i + "-0" + j).get(); + } + } + refresh(); + + { + + OpenPointInTimeRequest request = new OpenPointInTimeRequest("*").keepAlive(TimeValue.timeValueMinutes(2)); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); + try { + SearchContextId searchContextId = SearchContextId.decode(writableRegistry(), response.getPointInTimeId()); + String[] actualIndices = searchContextId.getActualIndices(); + assertEquals(3, actualIndices.length); + } finally { + closePointInTime(response.getPointInTimeId()); + } + } + { + OpenPointInTimeRequest request = new OpenPointInTimeRequest("*").keepAlive(TimeValue.timeValueMinutes(2)); + request.indexFilter(new RangeQueryBuilder("@timestamp").gte("2023-03-01")); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); + String pitId = response.getPointInTimeId(); + try { + SearchContextId searchContextId = SearchContextId.decode(writableRegistry(), pitId); + String[] actualIndices = searchContextId.getActualIndices(); + assertEquals(1, actualIndices.length); + assertEquals("index-3", actualIndices[0]); + assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)).setSize(50), resp -> { + assertNoFailures(resp); + assertHitCount(resp, numDocs); + assertNotNull(resp.pointInTimeId()); + assertThat(resp.pointInTimeId(), equalTo(pitId)); + for (SearchHit hit : resp.getHits()) { + assertEquals("index-3", hit.getIndex()); + } + }); + } finally { + closePointInTime(pitId); + } + } + } + public void testRelocation() throws Exception { internalCluster().ensureAtLeastNumDataNodes(4); createIndex("test", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, between(0, 1)).build()); ensureGreen("test"); int numDocs = randomIntBetween(10, 50); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); @@ -179,7 +229,7 @@ public void testRelocation() throws Exception { if (randomBoolean()) { int moreDocs = randomIntBetween(10, 50); for (int i = 0; i < moreDocs; i++) { - client().prepareIndex("test").setId("more-" + i).setSource("value", i).get(); + prepareIndex("test").setId("more-" + i).setSource("value", i).get(); } refresh(); } @@ -210,7 +260,7 @@ public void testPointInTimeNotFound() throws Exception { int index1 = randomIntBetween(10, 50); for (int i = 0; i < index1; i++) { String id = Integer.toString(i); - client().prepareIndex("index").setId(id).setSource("value", i).get(); + prepareIndex("index").setId(id).setSource("value", i).get(); } refresh(); String pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); @@ -241,13 +291,13 @@ public void testIndexNotFound() { int index1 = randomIntBetween(10, 50); for (int i = 0; i < index1; i++) { String id = Integer.toString(i); - client().prepareIndex("index-1").setId(id).setSource("value", i).get(); + prepareIndex("index-1").setId(id).setSource("value", i).get(); } int index2 = randomIntBetween(10, 50); for (int i = 0; i < index2; i++) { String id = Integer.toString(i); - client().prepareIndex("index-2").setId(id).setSource("value", i).get(); + prepareIndex("index-2").setId(id).setSource("value", i).get(); } refresh(); String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); @@ -283,8 +333,8 @@ public void testIndexNotFound() { public void testAllowNoIndex() { var request = new OpenPointInTimeRequest("my_index").indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) .keepAlive(TimeValue.timeValueMinutes(between(1, 10))); - String pit = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet().getPointInTimeId(); - var closeResp = client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pit)).actionGet(); + String pit = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet().getPointInTimeId(); + var closeResp = client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pit)).actionGet(); assertThat(closeResp.status(), equalTo(RestStatus.OK)); } @@ -305,7 +355,7 @@ public void testCanMatch() throws Exception { } } } - client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); + prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); SearchResponse resp = prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2020-01-02").lte("2020-01-03")) .setSearchType(SearchType.QUERY_THEN_FETCH) .setPreference(null) @@ -356,11 +406,11 @@ public void testPartialResults() throws Exception { int numDocs1 = randomIntBetween(10, 50); for (int i = 0; i < numDocs1; i++) { - client().prepareIndex(randomFrom("test-1")).setId(Integer.toString(i)).setSource("value", i).get(); + prepareIndex(randomFrom("test-1")).setId(Integer.toString(i)).setSource("value", i).get(); } int numDocs2 = randomIntBetween(10, 50); for (int i = 0; i < numDocs2; i++) { - client().prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); + prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); String pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); @@ -392,7 +442,7 @@ public void testPITTiebreak() throws Exception { createIndex(index, Settings.builder().put("index.number_of_shards", 1).build()); int numDocs = randomIntBetween(3, 20); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", randomIntBetween(0, 2)).get(); + prepareIndex(index).setSource("value", randomIntBetween(0, 2)).get(); expectedNumDocs++; } } @@ -428,8 +478,11 @@ public void testPITTiebreak() throws Exception { } public void testCloseInvalidPointInTime() { - expectThrows(Exception.class, () -> client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest("")).actionGet()); - List tasks = clusterAdmin().prepareListTasks().setActions(ClosePointInTimeAction.NAME).get().getTasks(); + expectThrows( + Exception.class, + () -> client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest("")).actionGet() + ); + List tasks = clusterAdmin().prepareListTasks().setActions(TransportClosePointInTimeAction.TYPE.name()).get().getTasks(); assertThat(tasks, empty()); } @@ -470,7 +523,7 @@ public void testOpenPITConcurrentShardRequests() throws Exception { OpenPointInTimeRequest request = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(1)); request.maxConcurrentShardRequests(maxConcurrentRequests); PlainActionFuture future = new PlainActionFuture<>(); - client().execute(OpenPointInTimeAction.INSTANCE, request, future); + client().execute(TransportOpenPointInTimeAction.TYPE, request, future); assertTrue(sentLatch.await(1, TimeUnit.MINUTES)); readyLatch.countDown(); closePointInTime(future.actionGet().getPointInTimeId()); @@ -534,11 +587,11 @@ private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int s private String openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } private void closePointInTime(String readerId) { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(readerId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java index df36f1babd364..227a3b8612331 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java @@ -162,7 +162,7 @@ public void onFailure(Exception e) { throw new AssertionError(); } }; - client.executeLocally(SearchAction.INSTANCE, new SearchRequest(request) { + client.executeLocally(TransportSearchAction.TYPE, new SearchRequest(request) { @Override public SearchTask createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { SearchTask task = super.createTask(id, type, action, parentTaskId, headers); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java index 68153e5e88c44..8b1acf11a7a5d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchShardsIT.java @@ -47,7 +47,7 @@ public void testBasic() { ); int numDocs = randomIntBetween(1, 10); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); + prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); } indicesAdmin().prepareRefresh(index).get(); } @@ -70,7 +70,7 @@ public void testBasic() { randomBoolean(), randomBoolean() ? null : randomAlphaOfLength(10) ); - var resp = client().execute(SearchShardsAction.INSTANCE, request).actionGet(); + var resp = client().execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(indicesWithData + indicesWithoutData)); int skipped = 0; for (SearchShardsGroup g : resp.getGroups()) { @@ -97,7 +97,7 @@ public void testBasic() { randomBoolean(), randomBoolean() ? null : randomAlphaOfLength(10) ); - SearchShardsResponse resp = client().execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = client().execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(indicesWithData + indicesWithoutData)); for (SearchShardsGroup g : resp.getGroups()) { assertFalse(g.skipped()); @@ -115,7 +115,7 @@ public void testRandom() { ); int numDocs = randomIntBetween(10, 1000); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); + prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); } indicesAdmin().prepareRefresh(index).get(); } @@ -137,7 +137,7 @@ public void testRandom() { randomBoolean(), randomBoolean() ? null : randomAlphaOfLength(10) ); - var searchShardsResponse = client().execute(SearchShardsAction.INSTANCE, searchShardsRequest).actionGet(); + var searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); assertThat(searchShardsResponse.getGroups(), hasSize(searchResponse.getTotalShards())); long skippedShards = searchShardsResponse.getGroups().stream().filter(SearchShardsGroup::skipped).count(); @@ -169,7 +169,7 @@ public void testNoCanMatchWithoutQuery() { totalShards += numShards; int numDocs = randomIntBetween(10, 100); for (int j = 0; j < numDocs; j++) { - client().prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); + prepareIndex(index).setSource("value", i).setId(Integer.toString(i)).get(); } indicesAdmin().prepareRefresh(index).get(); } @@ -182,7 +182,7 @@ public void testNoCanMatchWithoutQuery() { randomBoolean(), null ); - SearchShardsResponse resp = client().execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = client().execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(totalShards)); for (SearchShardsGroup group : resp.getGroups()) { assertFalse(group.skipped()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index d84d4270af24c..31ffe560be010 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -419,9 +419,9 @@ public void testSearchIdle() throws Exception { } } }); - client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); - client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); - client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); + prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); + prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); + prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); assertBusy(() -> { SearchResponse resp = prepareSearch("test").setQuery(new RangeQueryBuilder("created_date").gte("2020-01-02").lte("2020-01-03")) .setPreFilterShardSize(randomIntBetween(1, 3)) @@ -546,7 +546,7 @@ private void indexSomeDocs(String indexName, int numberOfShards, int numberOfDoc createIndex(indexName, Settings.builder().put("index.number_of_shards", numberOfShards).build()); for (int i = 0; i < numberOfDocs; i++) { - DocWriteResponse indexResponse = client().prepareIndex(indexName).setSource("number", randomInt()).get(); + DocWriteResponse indexResponse = prepareIndex(indexName).setSource("number", randomInt()).get(); assertEquals(RestStatus.CREATED, indexResponse.status()); } indicesAdmin().prepareRefresh(indexName).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java index c31c9cae301eb..6737d02434c0f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java @@ -34,15 +34,13 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { assertAcked(createIndexResponse); // indexing, by default, will work (waiting for one shard copy only) - client().prepareIndex("test").setId("1").setSource(source("1", "test"), XContentType.JSON).execute().actionGet(); + prepareIndex("test").setId("1").setSource(source("1", "test"), XContentType.JSON).get(); try { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(2) // wait for 2 active shard copies .setTimeout(timeValueMillis(100)) - .execute() - .actionGet(); + .get(); fail("can't index, does not enough active shard copies"); } catch (UnavailableShardsException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); @@ -59,29 +57,24 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(2) .setWaitForYellowStatus() - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); // this should work, since we now have two - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(2) .setTimeout(timeValueSeconds(1)) - .execute() - .actionGet(); + .get(); try { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout(timeValueMillis(100)) - .execute() - .actionGet(); + .get(); fail("can't index, not enough active shard copies"); } catch (UnavailableShardsException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); @@ -101,20 +94,17 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(3) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); // this should work, since we now have all shards started - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(source("1", "test"), XContentType.JSON) .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout(timeValueSeconds(1)) - .execute() - .actionGet(); + .get(); } private String source(String id, String nameValue) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 3d77e7164ce76..d98de846bd9da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -70,7 +70,7 @@ public void testNoSuchDoc() throws Exception { .endObject(); assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); - client().prepareIndex("test").setId("666").setSource("field", "foo bar").execute().actionGet(); + prepareIndex("test").setId("666").setSource("field", "foo bar").get(); refresh(); for (int i = 0; i < 20; i++) { ActionFuture termVector = client().termVectors(new TermVectorsRequest(indexOrAlias(), "" + i)); @@ -97,7 +97,7 @@ public void testExistingFieldWithNoTermVectorsNoNPE() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); // when indexing a field that simply has a question mark, the term vectors will be null - client().prepareIndex("test").setId("0").setSource("existingfield", "?").execute().actionGet(); + prepareIndex("test").setId("0").setSource("existingfield", "?").get(); refresh(); ActionFuture termVector = client().termVectors( new TermVectorsRequest(indexOrAlias(), "0").selectedFields(new String[] { "existingfield" }) @@ -125,7 +125,7 @@ public void testExistingFieldButNotInDocNPE() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); // when indexing a field that simply has a question mark, the term vectors will be null - client().prepareIndex("test").setId("0").setSource("anotherexistingfield", 1).execute().actionGet(); + prepareIndex("test").setId("0").setSource("anotherexistingfield", 1).get(); refresh(); ActionFuture termVectors = client().termVectors( new TermVectorsRequest(indexOrAlias(), "0").selectedFields(randomBoolean() ? new String[] { "existingfield" } : null) @@ -163,7 +163,7 @@ public void testNotIndexedField() throws Exception { List indexBuilders = new ArrayList<>(); for (int i = 0; i < 6; i++) { - indexBuilders.add(client().prepareIndex().setIndex("test").setId(String.valueOf(i)).setSource("field" + i, i)); + indexBuilders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("field" + i, i)); } indexRandom(true, indexBuilders); @@ -205,8 +205,7 @@ public void testSimpleTermVectors() throws IOException { ) ); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("field", "the quick brown fox jumps over the lazy dog") @@ -214,8 +213,7 @@ public void testSimpleTermVectors() throws IOException { // 31the34 35lazy39 40dog43 .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); } for (int i = 0; i < 10; i++) { @@ -224,7 +222,7 @@ public void testSimpleTermVectors() throws IOException { .setOffsets(true) .setPositions(true) .setSelectedFields(); - TermVectorsResponse response = resp.execute().actionGet(); + TermVectorsResponse response = resp.get(); assertThat(response.getIndex(), equalTo("test")); assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); @@ -308,8 +306,7 @@ public void testRandomSingleTermVectors() throws IOException { ) ); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("field", "the quick brown fox jumps over the lazy dog") @@ -317,8 +314,7 @@ public void testRandomSingleTermVectors() throws IOException { // 31the34 35lazy39 40dog43 .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); } String[] values = { "brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the" }; @@ -335,7 +331,7 @@ public void testRandomSingleTermVectors() throws IOException { .setOffsets(isOffsetRequested) .setPositions(isPositionsRequested) .setSelectedFields(); - TermVectorsResponse response = resp.execute().actionGet(); + TermVectorsResponse response = resp.get(); assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0)); @@ -470,7 +466,7 @@ public void testSimpleTermVectorsWithGenerate() throws IOException { ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource(source).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource(source).get(); refresh(); } @@ -480,8 +476,7 @@ public void testSimpleTermVectorsWithGenerate() throws IOException { .setOffsets(true) .setPositions(true) .setSelectedFields(fieldNames) - .execute() - .actionGet(); + .get(); assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); assertThat(fields.size(), equalTo(fieldNames.length)); @@ -551,7 +546,7 @@ public void testDuelWithAndWithoutTermVectors() throws IOException, ExecutionExc List indexBuilders = new ArrayList<>(); for (String indexName : indexNames) { for (int id = 0; id < content.length; id++) { - indexBuilders.add(client().prepareIndex().setIndex(indexName).setId(String.valueOf(id)).setSource("field1", content[id])); + indexBuilders.add(prepareIndex(indexName).setId(String.valueOf(id)).setSource("field1", content[id])); } } indexRandom(true, indexBuilders); @@ -628,7 +623,7 @@ public void testSimpleWildCards() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping(mapping)); ensureGreen(); - client().prepareIndex("test").setId("0").setSource(source).get(); + prepareIndex("test").setId("0").setSource(source).get(); refresh(); TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "0").setSelectedFields("field*").get(); @@ -652,7 +647,7 @@ public void testArtificialVsExisting() throws ExecutionException, InterruptedExc List indexBuilders = new ArrayList<>(); for (int i = 0; i < content.length; i++) { - indexBuilders.add(client().prepareIndex().setIndex("test").setId(String.valueOf(i)).setSource("field1", content[i])); + indexBuilders.add(prepareIndex("test").setId(String.valueOf(i)).setSource("field1", content[i])); } indexRandom(true, indexBuilders); @@ -740,7 +735,7 @@ public void testPerFieldAnalyzer() throws IOException { ensureGreen(); // index a single document with prepared source - client().prepareIndex("test").setId("0").setSource(source).get(); + prepareIndex("test").setId("0").setSource(source).get(); refresh(); // create random per_field_analyzer and selected fields @@ -814,7 +809,7 @@ public void testTermVectorsWithVersion() { assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -860,7 +855,7 @@ public void testTermVectorsWithVersion() { } logger.info("--> index doc 1 again, so increasing the version"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -923,7 +918,7 @@ public void testFilterLength() throws ExecutionException, InterruptedException, } tags.add(tag); } - indexRandom(true, client().prepareIndex("test").setId("1").setSource("tags", tags)); + indexRandom(true, prepareIndex("test").setId("1").setSource("tags", tags)); logger.info("Checking best tags by longest to shortest size ..."); TermVectorsRequest.FilterSettings filterSettings = new TermVectorsRequest.FilterSettings(); @@ -959,7 +954,7 @@ public void testFilterTermFreq() throws ExecutionException, InterruptedException } uniqueTags.add(tag); } - indexRandom(true, client().prepareIndex("test").setId("1").setSource("tags", tags)); + indexRandom(true, prepareIndex("test").setId("1").setSource("tags", tags)); logger.info("Checking best tags by highest to lowest term freq ..."); TermVectorsRequest.FilterSettings filterSettings = new TermVectorsRequest.FilterSettings(); @@ -990,7 +985,7 @@ public void testFilterDocFreq() throws ExecutionException, InterruptedException, List tags = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { tags.add("tag_" + i); - builders.add(client().prepareIndex("test").setId(i + "").setSource("tags", tags)); + builders.add(prepareIndex("test").setId(i + "").setSource("tags", tags)); } indexRandom(true, builders); @@ -1016,7 +1011,7 @@ public void testArtificialDocWithPreference() throws InterruptedException, IOExc ensureGreen(); // index document - indexRandom(true, client().prepareIndex("test").setId("1").setSource("field1", "random permutation")); + indexRandom(true, prepareIndex("test").setId("1").setSource("field1", "random permutation")); // Get search shards ClusterSearchShardsResponse searchShardsResponse = clusterAdmin().prepareSearchShards("test").get(); @@ -1052,7 +1047,7 @@ public void testTermVectorsWithIgnoredField() throws IOException, InterruptedExc ensureGreen(); // add a doc with a bad long field - indexRandom(true, client().prepareIndex("index").setId("1").setSource("{\"field\":\"foo\"}", XContentType.JSON)); + indexRandom(true, prepareIndex("index").setId("1").setSource("{\"field\":\"foo\"}", XContentType.JSON)); // do a tv request for all fields, _ignored should be returned TermVectorsResponse resp = client().prepareTermVectors("index", "1").setSelectedFields("*").get(); @@ -1089,10 +1084,7 @@ public void testWithKeywordAndNormalizer() throws IOException, ExecutionExceptio for (String indexName : indexNames) { for (int id = 0; id < content.length; id++) { indexBuilders.add( - client().prepareIndex() - .setIndex(indexName) - .setId(String.valueOf(id)) - .setSource("field1", content[id], "field2", content[id]) + prepareIndex(indexName).setId(String.valueOf(id)).setSource("field1", content[id], "field2", content[id]) ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java index 875b29a3de771..9beffa7f964d9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java @@ -69,7 +69,7 @@ public void testMissingIndexThrowsMissingIndex() throws Exception { TermVectorsRequestBuilder requestBuilder = client().prepareTermVectors("testX", Integer.toString(1)); MultiTermVectorsRequestBuilder mtvBuilder = client().prepareMultiTermVectors(); mtvBuilder.add(requestBuilder.request()); - MultiTermVectorsResponse response = mtvBuilder.execute().actionGet(); + MultiTermVectorsResponse response = mtvBuilder.get(); assertThat(response.getResponses().length, equalTo(1)); assertThat(response.getResponses()[0].getFailure().getCause(), instanceOf(IndexNotFoundException.class)); assertThat(response.getResponses()[0].getFailure().getCause().getMessage(), equalTo("no such index [testX]")); @@ -84,7 +84,7 @@ public void testMultiTermVectorsWithVersion() throws Exception { assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog @@ -133,7 +133,7 @@ public void testMultiTermVectorsWithVersion() throws Exception { assertThat(response.getResponses()[2].getFailure().getCause().getCause(), instanceOf(VersionConflictEngineException.class)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index c1ca4c60f176e..8ede5dc5ef29f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -64,7 +63,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; @@ -269,66 +269,89 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { ).actionGet(); logger.info("--> checking single filtering alias search"); - SearchResponse searchResponse = prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1"); + assertResponse( + prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1") + ); logger.info("--> checking single filtering alias wildcard search"); - searchResponse = prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1"); + assertResponse( + prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1") + ); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); logger.info("--> checking single filtering alias search with sort"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_index", SortOrder.ASC).get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_index", SortOrder.ASC), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); logger.info("--> checking single filtering alias search with global facets"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) - .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))) - .get(); - assertNoFailures(searchResponse); - Global global = searchResponse.getAggregations().get("global"); - Terms terms = global.getAggregations().get("test"); - assertThat(terms.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))), + searchResponse -> { + Global global = searchResponse.getAggregations().get("global"); + Terms terms = global.getAggregations().get("test"); + assertThat(terms.getBuckets().size(), equalTo(4)); + } + ); logger.info("--> checking single filtering alias search with global facets and sort"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) - .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))) - .addSort("_index", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - global = searchResponse.getAggregations().get("global"); - terms = global.getAggregations().get("test"); - assertThat(terms.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name"))) + .addSort("_index", SortOrder.ASC), + searchResponse -> { + Global global = searchResponse.getAggregations().get("global"); + Terms terms = global.getAggregations().get("test"); + assertThat(terms.getBuckets().size(), equalTo(4)); + } + ); logger.info("--> checking single filtering alias search with non-global facets"); - searchResponse = prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) - .addAggregation(AggregationBuilders.terms("test").field("name")) - .addSort("_index", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - terms = searchResponse.getAggregations().get("test"); - assertThat(terms.getBuckets().size(), equalTo(2)); - - searchResponse = prepareSearch("foos", "bars").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2"); + assertNoFailuresAndResponse( + prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar")) + .addAggregation(AggregationBuilders.terms("test").field("name")) + .addSort("_index", SortOrder.ASC), + searchResponse -> { + Terms terms = searchResponse.getAggregations().get("test"); + assertThat(terms.getBuckets().size(), equalTo(2)); + } + ); + assertResponse( + prepareSearch("foos", "bars").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2") + ); logger.info("--> checking single non-filtering alias search"); - searchResponse = prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); logger.info("--> checking non-filtering alias and filtering alias search"); - searchResponse = prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); logger.info("--> checking index and filtering alias search"); - searchResponse = prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); logger.info("--> checking index and alias wildcard search"); - searchResponse = prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4"); + assertResponse( + prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + ); } public void testSearchingFilteringAliasesTwoIndices() throws Exception { @@ -373,55 +396,63 @@ public void testSearchingFilteringAliasesTwoIndices() throws Exception { refresh(); logger.info("--> checking filtering alias for two indices"); - SearchResponse searchResponse = prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "5"); - assertThat( - prepareSearch("foos").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(2L) + assertResponse( + prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "5") + ); + assertResponse( + prepareSearch("foos").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)) ); logger.info("--> checking filtering alias for one index"); - searchResponse = prepareSearch("bars").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "2"); - assertThat( - prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(1L) + assertResponse( + prepareSearch("bars").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "2") + ); + assertResponse( + prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)) ); logger.info("--> checking filtering alias for two indices and one complete index"); - searchResponse = prepareSearch("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5"); - assertThat( - prepareSearch("foos", "test1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(5L) + assertResponse( + prepareSearch("foos", "test1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5") + ); + assertResponse( + prepareSearch("foos", "test1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)) ); logger.info("--> checking filtering alias for two indices and non-filtering alias for one index"); - searchResponse = prepareSearch("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5"); - assertThat( - prepareSearch("foos", "aliasToTest1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(5L) + assertResponse( + prepareSearch("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5") + ); + assertResponse( + prepareSearch("foos", "aliasToTest1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)) ); logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices"); - searchResponse = prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)); - assertThat( - prepareSearch("foos", "aliasToTests").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(8L) + assertResponse( + prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)) + ); + assertResponse( + prepareSearch("foos", "aliasToTests").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)) ); logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices"); - searchResponse = prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get(); - assertHits(searchResponse.getHits(), "4", "8"); - assertThat( - prepareSearch("foos", "aliasToTests").setSize(0) - .setQuery(QueryBuilders.termQuery("name", "something")) - .get() - .getHits() - .getTotalHits().value, - equalTo(2L) + assertResponse( + prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")), + searchResponse -> assertHits(searchResponse.getHits(), "4", "8") + ); + assertResponse( + prepareSearch("foos", "aliasToTests").setSize(0).setQuery(QueryBuilders.termQuery("name", "something")), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)) ); } @@ -477,58 +508,58 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { refresh(); logger.info("--> checking filtering alias for multiple indices"); - SearchResponse searchResponse = prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "21", "31", "13", "33"); - assertThat( - prepareSearch("filter23", "filter13").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(4L) + assertResponse( + prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "21", "31", "13", "33") + ); + assertResponse( + prepareSearch("filter23", "filter13").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(4L)) ); - searchResponse = prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13"); - assertThat( - prepareSearch("filter23", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(5L) + assertResponse( + prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13") + ); + assertResponse( + prepareSearch("filter23", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)) ); - searchResponse = prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "11", "12", "13", "33"); - assertThat( - prepareSearch("filter13", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(4L) + assertResponse( + prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "33") + ); + assertResponse( + prepareSearch("filter13", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(4L)) ); - searchResponse = prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33"); - assertThat( - prepareSearch("filter13", "filter1", "filter23").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .get() - .getHits() - .getTotalHits().value, - equalTo(6L) + assertResponse( + prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33") + ); + assertResponse( + prepareSearch("filter13", "filter1", "filter23").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(6L)) ); - searchResponse = prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33"); - assertThat( - prepareSearch("filter23", "filter13", "test2").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .get() - .getHits() - .getTotalHits().value, - equalTo(6L) + assertResponse( + prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33") + ); + assertResponse( + prepareSearch("filter23", "filter13", "test2").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(6L)) ); - searchResponse = prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33"); - assertThat( - prepareSearch("filter23", "filter13", "test1", "test2").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .get() - .getHits() - .getTotalHits().value, - equalTo(8L) + assertResponse( + prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33") + ); + assertResponse( + prepareSearch("filter23", "filter13", "test1", "test2").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(8L)) ); } @@ -581,9 +612,9 @@ public void testDeletingByQueryFilteringAliases() throws Exception { refresh(); logger.info("--> checking counts before delete"); - assertThat( - prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, - equalTo(1L) + assertResponse( + prepareSearch("bars").setSize(0).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)) ); } @@ -646,7 +677,7 @@ public void testDeleteAliases() throws Exception { assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")).execute().actionGet() + () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")).get() ); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", @@ -1059,13 +1090,13 @@ public void testAliasesCanBeAddedToIndicesOnly() throws Exception { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")).execute().actionGet() + () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")).get() ); assertEquals( "The provided expression [week_20] matches an alias, specify the corresponding concrete indices instead.", iae.getMessage() ); - assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("2017-05-20").alias("tmp")).execute().get()); + assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("2017-05-20").alias("tmp"))); } // Before 2.0 alias filters were parsed at alias creation time, in order @@ -1106,7 +1137,7 @@ public void testAliasFilterWithNowInRangeFilterAndQuery() { final int numDocs = scaledRandomIntBetween(5, 52); for (int i = 1; i <= numDocs; i++) { - client().prepareIndex("my-index").setSource("timestamp", "2016-12-12").get(); + prepareIndex("my-index").setSource("timestamp", "2016-12-12").get(); if (i % 2 == 0) { refresh(); assertHitCount(prepareSearch("filter1"), i); @@ -1128,7 +1159,7 @@ public void testAliasesWithBlocks() { () -> assertAcked(indicesAdmin().prepareAliases().addAlias("test", "alias1").addAlias("test", "alias2")) ); assertAliasesVersionIncreases("test", () -> assertAcked(indicesAdmin().prepareAliases().removeAlias("test", "alias1"))); - assertThat(indicesAdmin().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(indicesAdmin().prepareGetAliases("alias2").get().getAliases().get("test").size(), equalTo(1)); assertFalse(indicesAdmin().prepareGetAliases("alias2").get().getAliases().isEmpty()); } finally { disableIndexBlock("test", block); @@ -1146,7 +1177,7 @@ public void testAliasesWithBlocks() { "test", () -> assertBlocked(indicesAdmin().prepareAliases().removeAlias("test", "alias2"), INDEX_READ_ONLY_BLOCK) ); - assertThat(indicesAdmin().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(indicesAdmin().prepareGetAliases("alias2").get().getAliases().get("test").size(), equalTo(1)); assertFalse(indicesAdmin().prepareGetAliases("alias2").get().getAliases().isEmpty()); } finally { @@ -1182,7 +1213,7 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().removeIndex("foo").execute().actionGet() + () -> indicesAdmin().prepareAliases().removeIndex("foo").get() ); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", @@ -1202,7 +1233,7 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE public void testRemoveIndexAndReplaceWithAlias() throws InterruptedException, ExecutionException { assertAcked(indicesAdmin().prepareCreate("test")); - indexRandom(true, client().prepareIndex("test_2").setId("test").setSource("test", "test")); + indexRandom(true, prepareIndex("test_2").setId("test").setSource("test", "test")); assertAliasesVersionIncreases( "test_2", () -> assertAcked(indicesAdmin().prepareAliases().addAlias("test_2", "test").removeIndex("test")) @@ -1299,29 +1330,36 @@ public void testIndexingAndQueryingHiddenAliases() throws Exception { refresh(writeIndex, nonWriteIndex); // Make sure that the doc written to the alias made it - SearchResponse searchResponse = prepareSearch(writeIndex).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "2", "3"); + assertResponse( + prepareSearch(writeIndex).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "2", "3") + ); // Ensure that all docs can be gotten through the alias - searchResponse = prepareSearch(alias).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch(alias).setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); // And querying using a wildcard with indices options set to expand hidden - searchResponse = prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()) - .setIndicesOptions(IndicesOptions.fromOptions(false, false, true, false, true, true, true, false, false)) - .get(); - assertHits(searchResponse.getHits(), "1", "2", "3"); + assertResponse( + prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()) + .setIndicesOptions(IndicesOptions.fromOptions(false, false, true, false, true, true, true, false, false)), + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + ); // And that querying the alias with a wildcard and no expand options fails - searchResponse = prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()).get(); - assertThat(searchResponse.getHits().getHits(), emptyArray()); + assertResponse( + prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()), + searchResponse -> assertThat(searchResponse.getHits().getHits(), emptyArray()) + ); } public void testCreateIndexAndAliasWithSameNameFails() { final String indexName = "index-name"; final IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)).execute().actionGet() + () -> indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)).get() ); assertEquals("alias name [" + indexName + "] self-conflicts with index name", iae.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index 9b78bb9369fd7..6206f2357218c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -107,7 +107,7 @@ public void testIndexReadWriteMetadataBlocks() { private void canCreateIndex(String index) { try { - CreateIndexResponse r = indicesAdmin().prepareCreate(index).execute().actionGet(); + CreateIndexResponse r = indicesAdmin().prepareCreate(index).get(); assertThat(r, notNullValue()); } catch (ClusterBlockException e) { fail(); @@ -116,7 +116,7 @@ private void canCreateIndex(String index) { private void canNotCreateIndex(String index) { try { - indicesAdmin().prepareCreate(index).execute().actionGet(); + indicesAdmin().prepareCreate(index).get(); fail(); } catch (ClusterBlockException e) { // all is well @@ -125,9 +125,9 @@ private void canNotCreateIndex(String index) { private void canIndexDocument(String index) { try { - IndexRequestBuilder builder = client().prepareIndex(index); + IndexRequestBuilder builder = prepareIndex(index); builder.setSource("foo", "bar"); - DocWriteResponse r = builder.execute().actionGet(); + DocWriteResponse r = builder.get(); assertThat(r, notNullValue()); } catch (ClusterBlockException e) { fail(); @@ -136,9 +136,9 @@ private void canIndexDocument(String index) { private void canNotIndexDocument(String index) { try { - IndexRequestBuilder builder = client().prepareIndex(index); + IndexRequestBuilder builder = prepareIndex(index); builder.setSource("foo", "bar"); - builder.execute().actionGet(); + builder.get(); fail(); } catch (ClusterBlockException e) { // all is well @@ -250,9 +250,7 @@ public void testAddIndexBlock() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); final APIBlock block = randomAddableBlock(); @@ -278,7 +276,7 @@ public void testSameBlockTwice() throws Exception { false, randomBoolean(), IntStream.range(0, randomIntBetween(1, 10)) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) + .mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) .collect(toList()) ); } @@ -323,9 +321,7 @@ public void testConcurrentAddBlock() throws InterruptedException { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); ensureYellowAndNoInitializingShards(indexName); @@ -404,7 +400,7 @@ public void testAddBlockWhileDeletingIndices() throws Exception { false, randomBoolean(), IntStream.range(0, 10) - .mapToObj(n -> client().prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) + .mapToObj(n -> prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) .collect(toList()) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java index 5e8e6c634fa47..c45f980553431 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/broadcast/BroadcastActionsIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.broadcast; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -18,6 +17,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class BroadcastActionsIT extends ESIntegTestCase { @@ -42,11 +42,12 @@ public void testBroadcastOperations() throws IOException { // check count for (int i = 0; i < 5; i++) { // test successful - SearchResponse countResponse = prepareSearch("test").setSize(0).setQuery(matchAllQuery()).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); - assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getFailedShards(), equalTo(0)); + assertResponse(prepareSearch("test").setSize(0).setQuery(matchAllQuery()), countResponse -> { + assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); + assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getFailedShards(), equalTo(0)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java index a6609e70f963d..214e3f73144d9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -56,17 +56,13 @@ public void testSimpleLocalHealth() { public void testHealth() { logger.info("--> running cluster health on an index that does not exists"); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test1") - .setWaitForYellowStatus() - .setTimeout("1s") - .execute() - .actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test1").setWaitForYellowStatus().setTimeout("1s").get(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); logger.info("--> running cluster wide health"); - healthResponse = clusterAdmin().prepareHealth().setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth().setWaitForGreenStatus().setTimeout("10s").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); @@ -75,13 +71,13 @@ public void testHealth() { createIndex("test1"); logger.info("--> running cluster health on an index that does exists"); - healthResponse = clusterAdmin().prepareHealth("test1").setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth("test1").setWaitForGreenStatus().setTimeout("10s").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); logger.info("--> running cluster health on an index that does exists and an index that doesn't exists"); - healthResponse = clusterAdmin().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").get(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index c437f2b5a4c8c..746ddc56870ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -67,7 +67,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { String node1Name = internalCluster().startNode(settings); logger.info("--> should be blocked, no master..."); - ClusterState state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state @@ -77,16 +77,15 @@ public void testTwoNodesNoMasterBlock() throws Exception { ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(false)); @@ -94,11 +93,11 @@ public void testTwoNodesNoMasterBlock() throws Exception { NumShards numShards = getNumShards("test"); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().getActiveShards(), + clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().getActiveShards(), equalTo(numShards.totalNumShards) ); // flush for simpler debugging @@ -107,7 +106,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> verify we get the data back"); for (int i = 0; i < 10; i++) { assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L) ); } @@ -121,11 +120,11 @@ public void testTwoNodesNoMasterBlock() throws Exception { internalCluster().stopNode(masterNode); assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().setLocal(true).get().getState(); assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); // verify that both nodes are still in the cluster state but there is no master assertThat(state.nodes().getSize(), equalTo(2)); @@ -138,16 +137,15 @@ public void testTwoNodesNoMasterBlock() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -172,7 +170,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { internalCluster().stopNode(otherNode); assertBusy(() -> { - ClusterState state1 = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state1 = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -184,16 +182,15 @@ public void testTwoNodesNoMasterBlock() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -218,7 +215,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { assertBusy(() -> { for (Client client : clients()) { - ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); } }); @@ -230,27 +227,26 @@ public void testThreeNodesNoMasterBlock() throws Exception { ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); createIndex("test"); NumShards numShards = getNumShards("test"); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } ensureGreen(); // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), + clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().isTimedOut(), equalTo(false) ); // flush for simpler debugging - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); refresh(); logger.info("--> verify we get the data back"); @@ -269,7 +265,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { logger.info("--> verify that there is no master anymore on remaining node"); // spin here to wait till the state is set assertBusy(() -> { - ClusterState st = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState st = clusterAdmin().prepareState().setLocal(true).get().getState(); assertThat(st.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -279,7 +275,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().validateClusterFormed(); ensureGreen(); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); logger.info("--> verify we the data back"); @@ -346,8 +342,7 @@ public void onFailure(Exception e) { .admin() .cluster() .prepareState() - .execute() - .actionGet() + .get() .getState() .nodes() .getMasterNode(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index c273a0b0f7c6b..23c13a3dbf579 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Requests; @@ -45,6 +44,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -72,7 +72,7 @@ public void testNoMasterActions() throws Exception { final List nodes = internalCluster().startNodes(3, settings); createIndex("test"); - clusterAdmin().prepareHealth("test").setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth("test").setWaitForGreenStatus().get(); final NetworkDisruption disruptionScheme = new NetworkDisruption( new IsolateAllNodes(new HashSet<>(nodes)), @@ -84,7 +84,7 @@ public void testNoMasterActions() throws Exception { final Client clientToMasterlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -224,8 +224,8 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { prepareCreate("test1").setSettings(indexSettings(1, 2)).get(); prepareCreate("test2").setSettings(indexSettings(3, 0)).get(); clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); - client().prepareIndex("test1").setId("1").setSource("field", "value1").get(); - client().prepareIndex("test2").setId("1").setSource("field", "value1").get(); + prepareIndex("test1").setId("1").setSource("field", "value1").get(); + prepareIndex("test2").setId("1").setSource("field", "value1").get(); refresh(); ensureSearchable("test1", "test2"); @@ -255,9 +255,10 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { logger.info("--> here 3"); assertHitCount(clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true), 1L); - SearchResponse countResponse = clientToMasterlessNode.prepareSearch("test2").setAllowPartialSearchResults(true).setSize(0).get(); - assertThat(countResponse.getTotalShards(), equalTo(3)); - assertThat(countResponse.getSuccessfulShards(), equalTo(1)); + assertResponse(clientToMasterlessNode.prepareSearch("test2").setAllowPartialSearchResults(true).setSize(0), countResponse -> { + assertThat(countResponse.getTotalShards(), equalTo(3)); + assertThat(countResponse.getSuccessfulShards(), equalTo(1)); + }); TimeValue timeout = TimeValue.timeValueMillis(200); long now = System.currentTimeMillis(); @@ -299,7 +300,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { prepareCreate("test1").setSettings(indexSettings(1, 1)).get(); clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); - client().prepareIndex("test1").setId("1").setSource("field", "value1").get(); + prepareIndex("test1").setId("1").setSource("field", "value1").get(); refresh(); ensureGreen("test1"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java index 11b3027c23550..f53e559bfda5d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java @@ -205,8 +205,7 @@ private void ensureRed(String indexName) throws Exception { ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) .setWaitForStatus(ClusterHealthStatus.RED) .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); + .get(); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index 8618104fadc26..770ca21fd6898 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -46,13 +46,7 @@ public void testIndexingBeforeAndAfterDataNodesStart() { internalCluster().startNode(nonDataNode()); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForNodes("2") - .setLocal(true) - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).get().isTimedOut(), equalTo(false) ); @@ -67,13 +61,7 @@ public void testIndexingBeforeAndAfterDataNodesStart() { // now, start a node data, and see that it gets with shards internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForNodes("3") - .setLocal(true) - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).get().isTimedOut(), equalTo(false) ); @@ -87,10 +75,7 @@ public void testShardsAllocatedAfterDataNodesStart() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); @@ -102,8 +87,7 @@ public void testShardsAllocatedAfterDataNodesStart() { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() - .execute() - .actionGet() + .get() .isTimedOut(), equalTo(false) ); @@ -115,10 +99,7 @@ public void testAutoExpandReplicasAdjustedWhenDataNodeJoins() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 34682637b0632..93d714c79c391 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -35,10 +35,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { - assertThat( - clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), - nullValue() - ); + assertThat(clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected @@ -46,29 +43,11 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start master node"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); @@ -77,10 +56,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { internalCluster().stopCurrentMasterNode(); try { - assertThat( - clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), - nullValue() - ); + assertThat(clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected @@ -91,29 +67,11 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { Settings.builder().put(nonDataNode(masterNode())).put(masterDataPathSettings) ); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName) ); } @@ -123,10 +81,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { - assertThat( - clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), - nullValue() - ); + assertThat(clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected @@ -134,71 +89,26 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { logger.info("--> start master node (1)"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); logger.info("--> start master node (2)"); final String nextMasterEligableNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName) ); @@ -207,57 +117,21 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { // removing the master from the voting configuration immediately triggers the master to step down assertBusy(() -> { assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); }); internalCluster().stopNode(masterNodeName); assertThat( - internalCluster().nonMasterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient() - .admin() - .cluster() - .prepareState() - .execute() - .actionGet() - .getState() - .nodes() - .getMasterNode() - .getName(), + internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java index 8775d8949dc55..64ac8318dce23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java @@ -31,25 +31,17 @@ public void testUpdateSettingsValidation() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.totalNumShards)); setReplicaCount(0, "test"); - healthResponse = clusterAdmin().prepareHealth("test") - .setWaitForEvents(Priority.LANGUID) - .setWaitForGreenStatus() - .execute() - .actionGet(); + healthResponse = clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.numPrimaries)); try { - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.refresh_interval", "")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "")).get(); fail(); } catch (IllegalArgumentException ex) { logger.info("Error message: [{}]", ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index f55ac7172266d..c4f06cc90fdf3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -81,7 +81,7 @@ public void testSimpleAwareness() throws Exception { assertThat("Cluster health request timed out", clusterHealth.isTimedOut(), equalTo(false)); logger.info("--> checking current state"); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); // check that closed indices are effectively closed final List notClosedIndices = indicesToClose.stream() @@ -114,7 +114,7 @@ public void testAwarenessZones() { String A_1 = nodes.get(3); logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("4").execute().actionGet(); + ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); assertThat(health.isTimedOut(), equalTo(false)); createIndex("test", 5, 1); @@ -129,11 +129,10 @@ public void testAwarenessZones() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_1), anyOf(equalTo(2), equalTo(3))); @@ -168,10 +167,9 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -184,8 +182,7 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("3") - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); clusterAdmin().prepareReroute().get(); health = clusterAdmin().prepareHealth() @@ -195,11 +192,10 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNodes("3") .setWaitForActiveShards(10) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -212,8 +208,7 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("4") - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); clusterAdmin().prepareReroute().get(); health = clusterAdmin().prepareHealth() @@ -223,11 +218,10 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNodes("4") .setWaitForActiveShards(10) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -242,11 +236,10 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNodes("4") .setWaitForActiveShards(10) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(3)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 6175395803e88..3b9d3e133b63a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -89,14 +89,13 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { indicesAdmin().prepareCreate("test") .setWaitForActiveShards(ActiveShardCount.NONE) .setSettings(Settings.builder().put("index.number_of_shards", 1)) - .execute() - .actionGet(); + .get(); if (randomBoolean()) { indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, *under dry_run*"); @@ -104,8 +103,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .setDryRun(true) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -114,15 +112,14 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { ); logger.info("--> get the state, verify nothing changed because of the dry run"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -134,12 +131,11 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -150,8 +146,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new MoveAllocationCommand("test", 0, node_1, node_2)) - .execute() - .actionGet() + .get() .getState(); assertThat( @@ -168,12 +163,11 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary moved from node1 to node2"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), @@ -208,7 +202,7 @@ public void testDelayWithALargeAmountOfShards() throws Exception { internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(4)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("4").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create indices"); @@ -238,30 +232,28 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc String node_1 = internalCluster().startNode(commonSettings); internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate"); indicesAdmin().prepareCreate("test") .setWaitForActiveShards(ActiveShardCount.NONE) .setSettings(Settings.builder().put("index.number_of_shards", 1)) - .execute() - .actionGet(); + .get(); final boolean closed = randomBoolean(); if (closed) { indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -273,12 +265,11 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -286,7 +277,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc ); if (closed == false) { - client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } final Index index = resolveIndex("test"); @@ -306,15 +297,14 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc // TODO can we get around this? the cluster is RED, so what do we wait for? clusterAdmin().prepareReroute().get(); assertThat( - clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").execute().actionGet().getStatus(), + clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").get().getStatus(), equalTo(ClusterHealthStatus.RED) ); logger.info("--> explicitly allocate primary"); state = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) - .execute() - .actionGet() + .get() .getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( @@ -325,7 +315,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc logger.info("--> get the state, verify shard 1 primary allocated"); final String nodeToCheck = node_1; assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); String nodeId = clusterState.nodes().resolveNode(nodeToCheck).getId(); assertThat(clusterState.getRoutingNodes().node(nodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); }); @@ -338,7 +328,7 @@ public void testRerouteExplain() { String node_1 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard"); @@ -355,12 +345,12 @@ public void testRerouteExplain() { logger.info("--> starting a second node"); String node_2 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> try to move the shard from node1 to node2"); MoveAllocationCommand cmd = new MoveAllocationCommand("test", 0, node_1, node_2); - ClusterRerouteResponse resp = clusterAdmin().prepareReroute().add(cmd).setExplain(true).execute().actionGet(); + ClusterRerouteResponse resp = clusterAdmin().prepareReroute().add(cmd).setExplain(true).get(); RoutingExplanations e = resp.getExplanations(); assertThat(e.explanations().size(), equalTo(1)); RerouteExplanation explanation = e.explanations().get(0); @@ -379,12 +369,12 @@ public void testMessageLogging() { final String nodeName1 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").execute().actionGet(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String nodeName2 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").execute().actionGet(); + healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String indexName = "test_index"; @@ -395,8 +385,7 @@ public void testMessageLogging() { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) ) - .execute() - .actionGet(); + .get(); MockLogAppender dryRunMockLog = new MockLogAppender(); dryRunMockLog.addExpectation( @@ -414,8 +403,7 @@ public void testMessageLogging() { .setExplain(randomBoolean()) .setDryRun(true) .add(dryRunAllocation) - .execute() - .actionGet(); + .get(); // during a dry run, messages exist but are not logged or exposed assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1)); @@ -449,8 +437,7 @@ public void testMessageLogging() { .setExplain(true) // so we get a NO decision back rather than an exception .add(yesDecisionAllocation) .add(noDecisionAllocation) - .execute() - .actionGet(); + .get(); assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1)); assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary")); @@ -472,7 +459,7 @@ public void testClusterRerouteWithBlocks() { ensureGreen("test-blocks"); logger.info("--> check that the index has 1 shard"); - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); List shards = state.routingTable().allShards("test-blocks"); assertThat(shards, hasSize(1)); @@ -506,8 +493,7 @@ public void testClusterRerouteWithBlocks() { .setIndices("test-blocks") .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } finally { disableIndexBlock("test-blocks", blockSetting); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index f2fb19825371f..04fba1f46074f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -48,13 +48,10 @@ public void testDecommissionNodeNoReplicas() { ensureGreen("test"); logger.info("--> index some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(100L) - ); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L)); final boolean closed = randomBoolean(); if (closed) { @@ -67,7 +64,7 @@ public void testDecommissionNodeNoReplicas() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -81,11 +78,8 @@ public void testDecommissionNodeNoReplicas() { assertAcked(indicesAdmin().prepareOpen("test")); } - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(100L) - ); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L)); } public void testAutoExpandReplicasToFilteredNodes() { @@ -97,7 +91,7 @@ public void testAutoExpandReplicasToFilteredNodes() { logger.info("--> creating an index with auto-expand replicas"); createIndex("test", Settings.builder().put(AutoExpandReplicas.SETTING.getKey(), "0-all").build()); - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(1)); ensureGreen("test"); @@ -110,7 +104,7 @@ public void testAutoExpandReplicasToFilteredNodes() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(0)); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -135,13 +129,10 @@ public void testDisablingAllocationFiltering() { logger.info("--> index some data"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(100L) - ); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(100L)); final boolean closed = randomBoolean(); if (closed) { @@ -149,7 +140,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); } - ClusterState clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState clusterState = clusterAdmin().prepareState().get().getState(); IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test"); int numShardsOnNode1 = 0; for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -172,7 +163,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify all shards are allocated on node_1 now"); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); indexRoutingTable = clusterState.routingTable().index("test"); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -187,7 +178,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify that there are shards allocated on both nodes now"); - clusterState = clusterAdmin().prepareState().execute().actionGet().getState(); + clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2)); } @@ -202,8 +193,7 @@ public void testInvalidIPFilterClusterSettings() { IllegalArgumentException.class, () -> clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1.")) - .execute() - .actionGet() + .get() ); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java index 55814dba33562..f66430871c9d8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java @@ -33,7 +33,7 @@ public void testSaneAllocation() { } ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { if (node.isEmpty() == false) { @@ -42,7 +42,7 @@ public void testSaneAllocation() { } setReplicaCount(0, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { @@ -60,7 +60,7 @@ public void testSaneAllocation() { setReplicaCount(1, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().execute().actionGet().getState(); + state = clusterAdmin().prepareState().get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 80bba57270aa5..3869952bf3b7e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -284,7 +284,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { // this request does not change the cluster state, because mapping is already created, // we don't await and cancel committed publication - ActionFuture docIndexResponse = client().prepareIndex("index").setId("1").setSource("field", 42).execute(); + ActionFuture docIndexResponse = prepareIndex("index").setId("1").setSource("field", 42).execute(); // Wait a bit to make sure that the reason why we did not get a response // is that cluster state processing is blocked and not just that it takes @@ -373,7 +373,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { assertEquals(minVersion, maxVersion); }); - final ActionFuture docIndexResponse = client().prepareIndex("index").setId("1").setSource("field", 42).execute(); + final ActionFuture docIndexResponse = prepareIndex("index").setId("1").setSource("field", 42).execute(); assertBusy(() -> assertTrue(client().prepareGet("index", "1").get().isExists())); @@ -383,7 +383,7 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { // this request does not change the cluster state, because the mapping is dynamic, // we need to await and cancel committed publication ActionFuture dynamicMappingsFut = executeAndCancelCommittedPublication( - client().prepareIndex("index").setId("2").setSource("field2", 42) + prepareIndex("index").setId("2").setSource("field2", 42) ); // ...and wait for second mapping to be available on master diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 265cc9ee364db..00e171a7a132a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -137,7 +137,7 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { .build() ); assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -242,14 +242,7 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure NO_MASTER_BLOCK on data-only node"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode) - .admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState(); + ClusterState state = internalCluster().client(dataNode).admin().cluster().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -295,14 +288,7 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode2) - .admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState(); + ClusterState state = internalCluster().client(dataNode2).admin().cluster().prepareState().setLocal(true).get().getState(); assertFalse(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); assertTrue(state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); }); @@ -347,7 +333,7 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { .build() ); - ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); internalCluster().stopNode(node); @@ -359,7 +345,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); updateClusterSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); - ClusterState state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + ClusterState state = internalCluster().client().admin().cluster().prepareState().get().getState(); assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); internalCluster().stopCurrentMasterNode(); @@ -373,7 +359,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); ensureGreen(); - state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); + state = internalCluster().client().admin().cluster().prepareState().get().getState(); assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java new file mode 100644 index 0000000000000..59f4905d5924b --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class MetadataUpdateSettingsServiceIT extends ESIntegTestCase { + + public void testThatNonDynamicSettingChangesTakeEffect() throws Exception { + /* + * This test makes sure that when non-dynamic settings are updated that they actually take effect (as opposed to just being set + * in the cluster state). + */ + createIndex("test", Settings.EMPTY); + MetadataUpdateSettingsService metadataUpdateSettingsService = internalCluster().getCurrentMasterNodeInstance( + MetadataUpdateSettingsService.class + ); + UpdateSettingsClusterStateUpdateRequest request = new UpdateSettingsClusterStateUpdateRequest(); + List indices = new ArrayList<>(); + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + indices.add(indexService.index()); + } + } + request.indices(indices.toArray(Index.EMPTY_ARRAY)); + request.settings(Settings.builder().put("index.codec", "FastDecompressionCompressingStoredFieldsData").build()); + + // First make sure it fails if reopenShards is not set on the request: + AtomicBoolean expectedFailureOccurred = new AtomicBoolean(false); + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + fail("Should have failed updating a non-dynamic setting without reopenShards set to true"); + } + + @Override + public void onFailure(Exception e) { + expectedFailureOccurred.set(true); + } + }); + assertBusy(() -> assertThat(expectedFailureOccurred.get(), equalTo(true))); + + // Now we set reopenShards and expect it to work: + request.reopenShards(true); + AtomicBoolean success = new AtomicBoolean(false); + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertBusy(() -> assertThat(success.get(), equalTo(true))); + + // Now we look into the IndexShard objects to make sure that the code was actually updated (vs just the setting): + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + assertBusy(() -> { + for (IndexShard indexShard : indexService) { + final Engine engine = indexShard.getEngineOrNull(); + assertNotNull("engine is null for " + indexService.index().getName(), engine); + assertThat(engine.getEngineConfig().getCodec().getName(), equalTo("FastDecompressionCompressingStoredFieldsData")); + } + }); + } + } + } + + public void testThatNonDynamicSettingChangesDoNotUnncessesarilyCauseReopens() throws Exception { + /* + * This test makes sure that if a setting change request for a non-dynamic setting is made on an index that already has that + * value we don't unassign the shards to apply the change -- there is no need. First we set a non-dynamic setting for the + * first time, and see that the shards for the index are unassigned. Then we set a different dynamic setting, and include setting + * the original non-dynamic setting to the same value as the previous request. We make sure that the new setting comes through + * but that the shards are not unassigned. + */ + final String indexName = "test"; + createIndex(indexName, Settings.EMPTY); + MetadataUpdateSettingsService metadataUpdateSettingsService = internalCluster().getCurrentMasterNodeInstance( + MetadataUpdateSettingsService.class + ); + UpdateSettingsClusterStateUpdateRequest request = new UpdateSettingsClusterStateUpdateRequest(); + List indices = new ArrayList<>(); + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + indices.add(indexService.index()); + } + } + request.indices(indices.toArray(Index.EMPTY_ARRAY)); + request.settings(Settings.builder().put("index.codec", "FastDecompressionCompressingStoredFieldsData").build()); + request.reopenShards(true); + + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + AtomicBoolean shardsUnassigned = new AtomicBoolean(false); + AtomicBoolean expectedSettingsChangeInClusterState = new AtomicBoolean(false); + AtomicReference expectedSetting = new AtomicReference<>("index.codec"); + AtomicReference expectedSettingValue = new AtomicReference<>("FastDecompressionCompressingStoredFieldsData"); + clusterService.addListener(event -> { + // We want the cluster change event where the setting is applied. This will be the same one where shards are unassigned + if (event.metadataChanged() + && event.state().metadata().index(indexName) != null + && expectedSettingValue.get().equals(event.state().metadata().index(indexName).getSettings().get(expectedSetting.get()))) { + expectedSettingsChangeInClusterState.set(true); + if (event.routingTableChanged() && event.state().routingTable().indicesRouting().containsKey(indexName)) { + if (hasUnassignedShards(event.state(), indexName)) { + shardsUnassigned.set(true); + } + } + } + }); + + AtomicBoolean success = new AtomicBoolean(false); + // Make the first request, just to set things up: + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertBusy(() -> assertThat(success.get(), equalTo(true))); + assertBusy(() -> assertThat(expectedSettingsChangeInClusterState.get(), equalTo(true))); + assertThat(shardsUnassigned.get(), equalTo(true)); + + assertBusy(() -> assertThat(hasUnassignedShards(clusterService.state(), indexName), equalTo(false))); + + // Same request, except now we'll also set the dynamic "index.max_result_window" setting: + request.settings( + Settings.builder() + .put("index.codec", "FastDecompressionCompressingStoredFieldsData") + .put("index.max_result_window", "1500") + .build() + ); + success.set(false); + expectedSettingsChangeInClusterState.set(false); + shardsUnassigned.set(false); + expectedSetting.set("index.max_result_window"); + expectedSettingValue.set("1500"); + // Making this request ought to add this new setting but not unassign the shards: + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + + assertBusy(() -> assertThat(success.get(), equalTo(true))); + assertBusy(() -> assertThat(expectedSettingsChangeInClusterState.get(), equalTo(true))); + assertThat(shardsUnassigned.get(), equalTo(false)); + + } + + private boolean hasUnassignedShards(ClusterState state, String indexName) { + return state.routingTable() + .indicesRouting() + .get(indexName) + .allShards() + .anyMatch(shardRoutingTable -> shardRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size() > 0); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index ae8662dc5517d..012cb826a4403 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -154,7 +154,7 @@ private int indexDocs(String indexName, Object... source) throws InterruptedExce final int numExtraDocs = between(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numExtraDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource(source); + builders[i] = prepareIndex(indexName).setSource(source); } indexRandom(true, false, true, Arrays.asList(builders)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index 609a47815b07e..543b0be8ae48d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -138,7 +138,7 @@ private void indexRandomData() throws Exception { int numDocs = scaledRandomIntBetween(100, 1000); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } // we want to test both full divergent copies of the shard in terms of segments, and // a case where they are the same (using sync flush), index Random does all this goodness diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 2f3618f1d6aa7..b65e715b454dc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -91,10 +91,9 @@ public void testBulkWeirdScenario() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("test").setId("1").setSource("field1", "value1")) + .add(prepareIndex("test").setId("1").setSource("field1", "value1")) .add(client().prepareUpdate().setIndex("test").setId("1").setDoc("field2", "value2")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(2)); @@ -113,7 +112,7 @@ public void testBulkWeirdScenario() throws Exception { // returns data paths settings of in-sync shard copy private Settings createStaleReplicaScenario(String master) throws Exception { - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); refresh(); ClusterState state = clusterAdmin().prepareState().all().get().getState(); List shards = state.routingTable().allShards("test"); @@ -437,7 +436,7 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { ensureYellow("test"); assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); logger.info("--> indexing..."); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override @@ -464,7 +463,7 @@ public void testNotWaitForQuorumCopies() throws Exception { logger.info("--> creating index with 1 primary and 2 replicas"); createIndex("test", randomIntBetween(1, 3), 2); ensureGreen("test"); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); logger.info("--> removing 2 nodes from cluster"); internalCluster().stopNode(nodes.get(1)); internalCluster().stopNode(nodes.get(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 206b866bd4758..3418874bd5902 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -14,10 +14,10 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -377,7 +377,6 @@ private void assertEngineTypes() { } } - @AwaitsFix(bugUrl = "ES-4677") public void testRelocation() { var routingTableWatcher = new RoutingTableWatcher(); @@ -523,7 +522,7 @@ public void testSearchRouting() throws Exception { // do nothing } } - String pitId = client().execute(OpenPointInTimeAction.INSTANCE, openRequest).actionGet().getPointInTimeId(); + String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { final var profileResults = prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)) .setProfile(true) @@ -534,7 +533,7 @@ public void testSearchRouting() throws Exception { assertThat(profileKey, in(searchShardProfileKeys)); } } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitId)); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)); } } // search-shards API @@ -698,7 +697,7 @@ public void testRefreshFailsIfUnpromotableDisconnects() throws Exception { }); } - RefreshResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).execute().actionGet(); + RefreshResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).get(); assertThat( "each unpromotable replica shard should be added to the shard failures", response.getFailedShards(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java index 3e38ef22834d5..31e45e64d8afe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -60,10 +60,7 @@ public void testFloodStageExceeded() throws Exception { getTestFileStore(dataNodeName).setTotalSpace(1L); refreshClusterInfo(); assertBusy(() -> { - assertBlocked( - client().prepareIndex().setIndex(indexName).setId("1").setSource("f", "g"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex(indexName).setId("1").setSource("f", "g"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertThat(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), equalTo("true")); }); @@ -115,10 +112,7 @@ public void testRemoveExistingIndexBlocksWhenDiskThresholdMonitorIsDisabled() th getTestFileStore(dataNodeName).setTotalSpace(1L); refreshClusterInfo(); assertBusy(() -> { - assertBlocked( - client().prepareIndex().setIndex(indexName).setId("1").setSource("f", "g"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex(indexName).setId("1").setSource("f", "g"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertThat(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), equalTo("true")); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index ead474e6eea24..b1ac5b02f7dd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -182,7 +182,7 @@ private SmallestShards createReasonableSizedShards(final String indexName) throw while (true) { final IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[scaledRandomIntBetween(100, 10000)]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex(indexName).setSource("field", randomAlphaOfLength(10)); + indexRequestBuilders[i] = prepareIndex(indexName).setSource("field", randomAlphaOfLength(10)); } indexRandom(true, indexRequestBuilders); forceMerge(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 965674b772998..fd5e54631fd7a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -194,7 +194,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { assertThat("node2 has 2 shards", shardCountByNodeId.get(nodeIds.get(2)), equalTo(2)); } - client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); assertSearchHits(prepareSearch("test"), "1"); // Move all nodes above the low watermark so no shard movement can occur, and at least one node above the flood stage watermark so @@ -208,19 +208,13 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { ); assertBusy( - () -> assertBlocked( - client().prepareIndex().setIndex("test").setId("1").setSource("foo", "bar"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ) + () -> assertBlocked(prepareIndex("test").setId("1").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK) ); assertFalse(clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).get().isTimedOut()); // Cannot add further documents - assertBlocked( - client().prepareIndex().setIndex("test").setId("2").setSource("foo", "bar"), - IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK - ); + assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); assertSearchHits(prepareSearch("test"), "1"); logger.info("--> index is confirmed read-only, releasing disk space"); @@ -231,11 +225,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { // Attempt to create a new document until DiskUsageMonitor unblocks the index assertBusy(() -> { try { - client().prepareIndex("test") - .setId("3") - .setSource("foo", "bar") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("test").setId("3").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); } catch (ClusterBlockException e) { throw new AssertionError("retrying", e); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 7e3adf8e0283f..9818b0a89bc8e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -212,8 +212,7 @@ public void testClusterSettingsUpdateResponse() { ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings() .setTransientSettings(transientSettings1) .setPersistentSettings(persistentSettings1) - .execute() - .actionGet(); + .get(); assertAcked(response1); assertThat(response1.getTransientSettings().get(key1), notNullValue()); @@ -227,8 +226,7 @@ public void testClusterSettingsUpdateResponse() { ClusterUpdateSettingsResponse response2 = clusterAdmin().prepareUpdateSettings() .setTransientSettings(transientSettings2) .setPersistentSettings(persistentSettings2) - .execute() - .actionGet(); + .get(); assertAcked(response2); assertThat(response2.getTransientSettings().get(key1), notNullValue()); @@ -242,8 +240,7 @@ public void testClusterSettingsUpdateResponse() { ClusterUpdateSettingsResponse response3 = clusterAdmin().prepareUpdateSettings() .setTransientSettings(transientSettings3) .setPersistentSettings(persistentSettings3) - .execute() - .actionGet(); + .get(); assertAcked(response3); assertThat(response3.getTransientSettings().get(key1), nullValue()); @@ -502,7 +499,7 @@ public void testClusterUpdateSettingsWithBlocks() { } // It should work now - ClusterUpdateSettingsResponse response = request.execute().actionGet(); + ClusterUpdateSettingsResponse response = request.get(); assertAcked(response); assertThat(response.getTransientSettings().get(key1), notNullValue()); @@ -515,10 +512,7 @@ public void testMissingUnits() { assertAcked(prepareCreate("test")); try { - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.refresh_interval", "10")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "10")).get(); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("[index.refresh_interval] with value [10]")); @@ -542,7 +536,7 @@ private void testLoggerLevelUpdate(final BiConsumer throwBuilder.execute().actionGet()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> throwBuilder.get()); assertEquals("Unknown level constant [BOOM].", e.getMessage()); try { @@ -550,7 +544,7 @@ private void testLoggerLevelUpdate(final BiConsumer=2") - .setLocal(true) - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").setLocal(true).get().isTimedOut(), equalTo(false) ); dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java index 2561799b475ad..5ea78a6b1e3a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java @@ -57,7 +57,7 @@ public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Excep final String node_2 = internalCluster().startDataOnlyNode(); List indexRequestBuilderList = new ArrayList<>(); for (int i = 0; i < 100; i++) { - indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setSource("{\"int_field\":1}", XContentType.JSON)); + indexRequestBuilderList.add(prepareIndex("test").setSource("{\"int_field\":1}", XContentType.JSON)); } indexRandom(true, indexRequestBuilderList); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 586e95484afa4..4aabf0ac66a32 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -482,8 +482,7 @@ public void testRestartNodeWhileIndexing() throws Exception { while (stopped.get() == false && docID.get() < 5000) { String id = Integer.toString(docID.incrementAndGet()); try { - DocWriteResponse response = client().prepareIndex(index) - .setId(id) + DocWriteResponse response = prepareIndex(index).setId(id) .setSource(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) .get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 8a83dcf808007..af254d42ec3ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -240,9 +240,9 @@ public void testMappingTimeout() throws Exception { disruption.startDisrupting(); BulkRequestBuilder bulk = client().prepareBulk(); - bulk.add(client().prepareIndex("test").setId("2").setSource("{ \"f\": 1 }", XContentType.JSON)); - bulk.add(client().prepareIndex("test").setId("3").setSource("{ \"g\": 1 }", XContentType.JSON)); - bulk.add(client().prepareIndex("test").setId("4").setSource("{ \"f\": 1 }", XContentType.JSON)); + bulk.add(prepareIndex("test").setId("2").setSource("{ \"f\": 1 }", XContentType.JSON)); + bulk.add(prepareIndex("test").setId("3").setSource("{ \"g\": 1 }", XContentType.JSON)); + bulk.add(prepareIndex("test").setId("4").setSource("{ \"f\": 1 }", XContentType.JSON)); BulkResponse bulkResponse = bulk.get(); assertTrue(bulkResponse.hasFailures()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index bf5970f5ea402..526921fdc95ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -222,7 +222,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { final String indexName = "index-one"; createIndex(indexName); - client().prepareIndex(indexName).setSource("foo", "bar").get(); + prepareIndex(indexName).setSource("foo", "bar").get(); blockDataNode(repoName, dataNode); @@ -272,7 +272,7 @@ private void createRandomIndex(String idxName) throws InterruptedException { final int numdocs = randomIntBetween(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(idxName).setId(Integer.toString(i)).setSource("field1", "bar " + i); + builders[i] = prepareIndex(idxName).setId(Integer.toString(i)).setSource("field1", "bar " + i); } indexRandom(true, builders); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index ba086cb4e9788..407b1aae40600 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -74,22 +74,21 @@ public Path nodeConfigPath(int nodeOrdinal) { return null; } }; - try ( - InternalTestCluster other = new InternalTestCluster( - randomLong(), - createTempDir(), - false, - false, - 1, - 1, - internalCluster().getClusterName(), - configurationSource, - 0, - "other", - Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), - Function.identity() - ) - ) { + final InternalTestCluster other = new InternalTestCluster( + randomLong(), + createTempDir(), + false, + false, + 1, + 1, + internalCluster().getClusterName(), + configurationSource, + 0, + "other", + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), + Function.identity() + ); + try { other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); final ClusterState second = other.getInstance(ClusterService.class).state(); @@ -97,6 +96,8 @@ public Path nodeConfigPath(int nodeOrdinal) { assertThat(second.nodes().getSize(), equalTo(1)); assertThat(first.nodes().getMasterNodeId(), not(equalTo(second.nodes().getMasterNodeId()))); assertThat(first.metadata().clusterUUID(), not(equalTo(second.metadata().clusterUUID()))); + } finally { + other.close(); } } @@ -140,27 +141,27 @@ public Path nodeConfigPath(int nodeOrdinal) { return null; } }; - try ( - InternalTestCluster other = new InternalTestCluster( - randomLong(), - createTempDir(), - false, - false, - 1, - 1, - internalCluster().getClusterName(), - configurationSource, - 0, - "other", - Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), - Function.identity() - ); - var ignored = mockAppender.capturing(JoinHelper.class) - ) { + final InternalTestCluster other = new InternalTestCluster( + randomLong(), + createTempDir(), + false, + false, + 1, + 1, + internalCluster().getClusterName(), + configurationSource, + 0, + "other", + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), + Function.identity() + ); + try (var ignored = mockAppender.capturing(JoinHelper.class)) { other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); assertBusy(mockAppender::assertAllExpectationsMatched); + } finally { + other.close(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java index c475961336b51..a928b1a2eaecc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/AliasedIndexDocumentActionsIT.java @@ -17,7 +17,7 @@ public class AliasedIndexDocumentActionsIT extends DocumentActionsIT { protected void createIndex() { logger.info("Creating index [test1] with alias [test]"); try { - indicesAdmin().prepareDelete("test1").execute().actionGet(); + indicesAdmin().prepareDelete("test1").get(); } catch (Exception e) { // ignore } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java index 42bc0f19bf757..d3001f485846e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Strings; @@ -35,6 +34,7 @@ import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -56,9 +56,7 @@ public void testIndexActions() throws Exception { logger.info("Running Cluster Health"); ensureGreen(); logger.info("Indexing [type1/1]"); - DocWriteResponse indexResponse = client().prepareIndex() - .setIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource(source("1", "test")) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); @@ -89,7 +87,7 @@ public void testIndexActions() throws Exception { logger.info("Get [type1/1]"); for (int i = 0; i < 5; i++) { - getResult = client().prepareGet("test", "1").execute().actionGet(); + getResult = client().prepareGet("test", "1").get(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test")); @@ -100,10 +98,10 @@ public void testIndexActions() throws Exception { logger.info("Get [type1/1] with script"); for (int i = 0; i < 5; i++) { - getResult = client().prepareGet("test", "1").setStoredFields("name").execute().actionGet(); + getResult = client().prepareGet("test", "1").setStoredFields("name").get(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); assertThat(getResult.isExists(), equalTo(true)); - assertThat(getResult.getSourceAsBytes(), nullValue()); + assertThat(getResult.getSourceAsBytesRef(), nullValue()); assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test")); } @@ -114,7 +112,7 @@ public void testIndexActions() throws Exception { } logger.info("Delete [type1/1]"); - DeleteResponse deleteResponse = client().prepareDelete("test", "1").execute().actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").get(); assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName())); assertThat(deleteResponse.getId(), equalTo("1")); logger.info("Refreshing"); @@ -132,7 +130,7 @@ public void testIndexActions() throws Exception { client().index(new IndexRequest("test").id("2").source(source("2", "test2"))).actionGet(); logger.info("Flushing"); - FlushResponse flushResult = indicesAdmin().prepareFlush("test").execute().actionGet(); + FlushResponse flushResult = indicesAdmin().prepareFlush("test").get(); assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards)); assertThat(flushResult.getFailedShards(), equalTo(0)); logger.info("Refreshing"); @@ -154,22 +152,23 @@ public void testIndexActions() throws Exception { // check count for (int i = 0; i < 5; i++) { // test successful - SearchResponse countResponse = prepareSearch("test").setSize(0).setQuery(matchAllQuery()).execute().actionGet(); - assertNoFailures(countResponse); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); - assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getFailedShards(), equalTo(0)); + assertNoFailuresAndResponse(prepareSearch("test").setSize(0).setQuery(matchAllQuery()), countResponse -> { + assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); + assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getFailedShards(), equalTo(0)); + }); // count with no query is a match all one - countResponse = prepareSearch("test").setSize(0).execute().actionGet(); - assertThat( - "Failures " + countResponse.getShardFailures(), - countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, - equalTo(0) - ); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); - assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); - assertThat(countResponse.getFailedShards(), equalTo(0)); + assertNoFailuresAndResponse(prepareSearch("test").setSize(0), countResponse -> { + assertThat( + "Failures " + countResponse.getShardFailures(), + countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, + equalTo(0) + ); + assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L)); + assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); + assertThat(countResponse.getFailedShards(), equalTo(0)); + }); } } @@ -180,14 +179,13 @@ public void testBulk() throws Exception { ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex().setIndex("test").setId("1").setSource(source("1", "test"))) - .add(client().prepareIndex().setIndex("test").setId("2").setSource(source("2", "test")).setCreate(true)) - .add(client().prepareIndex().setIndex("test").setSource(source("3", "test"))) - .add(client().prepareIndex().setIndex("test").setCreate(true).setSource(source("4", "test"))) + .add(prepareIndex("test").setId("1").setSource(source("1", "test"))) + .add(prepareIndex("test").setId("2").setSource(source("2", "test")).setCreate(true)) + .add(prepareIndex("test").setSource(source("3", "test"))) + .add(prepareIndex("test").setCreate(true).setSource(source("4", "test"))) .add(client().prepareDelete().setIndex("test").setId("1")) - .add(client().prepareIndex().setIndex("test").setSource("{ xxx }", XContentType.JSON)) // failure - .execute() - .actionGet(); + .add(prepareIndex("test").setSource("{ xxx }", XContentType.JSON)) // failure + .get(); assertThat(bulkResponse.hasFailures(), equalTo(true)); assertThat(bulkResponse.getItems().length, equalTo(6)); @@ -222,7 +220,7 @@ public void testBulk() throws Exception { assertThat(bulkResponse.getItems()[5].getIndex(), equalTo(getConcreteIndexName())); waitForRelocation(ClusterHealthStatus.GREEN); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().actionGet(); + RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(refreshResponse); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java index 6571b9a6c928c..3aa97f79a82da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java @@ -33,7 +33,7 @@ public class ShardInfoIT extends ESIntegTestCase { public void testIndexAndDelete() throws Exception { prepareIndex(1); - DocWriteResponse indexResponse = client().prepareIndex("idx").setSource("{}", XContentType.JSON).get(); + DocWriteResponse indexResponse = prepareIndex("idx").setSource("{}", XContentType.JSON).get(); assertShardInfo(indexResponse); DeleteResponse deleteResponse = client().prepareDelete("idx", indexResponse.getId()).get(); assertShardInfo(deleteResponse); @@ -49,7 +49,7 @@ public void testBulkWithIndexAndDeleteItems() throws Exception { prepareIndex(1); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < 10; i++) { - bulkRequestBuilder.add(client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + bulkRequestBuilder.add(prepareIndex("idx").setSource("{}", XContentType.JSON)); } BulkResponse bulkResponse = bulkRequestBuilder.get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index 30940c1e154b0..8c6abc3e14cd8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -84,7 +84,7 @@ public Settings onNodeStopped(String nodeName) { internalCluster().startNode(dataPathSettings); logger.info("--> indexing a simple document"); - client().prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); + prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); logger.info("--> restarting the node without the data role"); ex = expectThrows( @@ -152,7 +152,7 @@ public void testFailsToStartIfUpgradedTooFar() { public void testUpgradeDataFolder() throws IOException, InterruptedException { String node = internalCluster().startNode(); prepareCreate("test").get(); - indexRandom(true, client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("test").setId("1").setSource("{}", XContentType.JSON)); String nodeId = clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId(); final Settings dataPathSettings = internalCluster().dataPathSettings(node); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java index 06bb86b8e072d..2a5295caf31b2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -35,7 +35,7 @@ public void testRepurpose() throws Exception { prepareCreate(indexName, indexSettings(1, 0)).get(); logger.info("--> indexing a simple document"); - client().prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); + prepareIndex(indexName).setId("1").setSource("field1", "value1").get(); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java index fe62452fece47..cdb418182fff2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/explain/ExplainActionIT.java @@ -42,7 +42,7 @@ public void testSimple() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSettings(Settings.builder().put("index.refresh_interval", -1))); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("field", "value1").get(); + prepareIndex("test").setId("1").setSource("field", "value1").get(); ExplainResponse response = client().prepareExplain(indexOrAlias(), "1").setQuery(QueryBuilders.matchAllQuery()).get(); assertNotNull(response); @@ -99,8 +99,7 @@ public void testExplainWithFields() throws Exception { ); ensureGreen("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().startObject("obj1").field("field1", "value1").field("field2", "value2").endObject().endObject() ) @@ -158,8 +157,7 @@ public void testExplainWithSource() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); ensureGreen("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().startObject("obj1").field("field1", "value1").field("field2", "value2").endObject().endObject() ) @@ -177,8 +175,8 @@ public void testExplainWithSource() throws Exception { assertThat(response.getExplanation().getValue(), equalTo(1.0f)); assertThat(response.getGetResult().isExists(), equalTo(true)); assertThat(response.getGetResult().getId(), equalTo("1")); - assertThat(response.getGetResult().getSource().size(), equalTo(1)); - assertThat(((Map) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1")); + assertThat(response.getGetResult().sourceAsMap().size(), equalTo(1)); + assertThat(((Map) response.getGetResult().sourceAsMap().get("obj1")).get("field1").toString(), equalTo("value1")); response = client().prepareExplain(indexOrAlias(), "1") .setQuery(QueryBuilders.matchAllQuery()) @@ -186,7 +184,7 @@ public void testExplainWithSource() throws Exception { .get(); assertNotNull(response); assertTrue(response.isMatch()); - assertThat(((Map) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1")); + assertThat(((Map) response.getGetResult().sourceAsMap().get("obj1")).get("field1").toString(), equalTo("value1")); } public void testExplainWithFilteredAlias() { @@ -196,7 +194,7 @@ public void testExplainWithFilteredAlias() { ); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); refresh(); ExplainResponse response = client().prepareExplain("alias1", "1").setQuery(QueryBuilders.matchAllQuery()).get(); @@ -213,7 +211,7 @@ public void testExplainWithFilteredAliasFetchSource() { ); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").get(); refresh(); ExplainResponse response = client().prepareExplain("alias1", "1") @@ -229,8 +227,8 @@ public void testExplainWithFilteredAliasFetchSource() { assertThat(response.getGetResult(), notNullValue()); assertThat(response.getGetResult().getIndex(), equalTo("test")); assertThat(response.getGetResult().getId(), equalTo("1")); - assertThat(response.getGetResult().getSource(), notNullValue()); - assertThat(response.getGetResult().getSource().get("field1"), equalTo("value1")); + assertThat(response.getGetResult().sourceAsMap(), notNullValue()); + assertThat(response.getGetResult().sourceAsMap().get("field1"), equalTo("value1")); } public void testExplainDateRangeInQueryString() { @@ -240,7 +238,7 @@ public void testExplainDateRangeInQueryString() { String aMonthAgo = DateTimeFormatter.ISO_LOCAL_DATE.format(now.minusMonths(1)); String aMonthFromNow = DateTimeFormatter.ISO_LOCAL_DATE.format(now.plusMonths(1)); - client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); + prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); @@ -290,12 +288,12 @@ public void testQueryRewrite() { .get(); ensureGreen("twitter"); - client().prepareIndex("twitter").setId("1").setSource("user", "user1", "followers", new String[] { "user2", "user3" }).get(); - client().prepareIndex("twitter").setId("2").setSource("user", "user2", "followers", new String[] { "user1" }).get(); + prepareIndex("twitter").setId("1").setSource("user", "user1", "followers", new String[] { "user2", "user3" }).get(); + prepareIndex("twitter").setId("2").setSource("user", "user2", "followers", new String[] { "user1" }).get(); refresh(); TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "2", "followers")); - ExplainResponse response = client().prepareExplain("twitter", "1").setQuery(termsLookupQuery).execute().actionGet(); + ExplainResponse response = client().prepareExplain("twitter", "1").setQuery(termsLookupQuery).get(); Explanation explanation = response.getExplanation(); assertNotNull(explanation); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java index fe447eca6e8fd..24bf198b7b42f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java @@ -18,7 +18,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; public class ClusterFeaturesIT extends ESIntegTestCase { @@ -29,7 +29,7 @@ public void testClusterHasFeatures() { FeatureService service = internalCluster().getCurrentMasterNodeInstance(FeatureService.class); - assertThat(service.getNodeFeatures(), hasItem(FeatureService.FEATURES_SUPPORTED.id())); + assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id())); // check the nodes all have a feature in their cluster state (there should always be features_supported) var response = clusterAdmin().state(new ClusterStateRequest().clear().nodes(true)).actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index d92664f55416a..600219da3d90f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -89,11 +89,10 @@ public void testMappingMetadataParsed() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> verify meta _routing required exists"); - MappingMetadata mappingMd = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test").mapping(); + MappingMetadata mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); logger.info("--> restarting nodes..."); @@ -103,7 +102,7 @@ public void testMappingMetadataParsed() throws Exception { ensureYellow(); logger.info("--> verify meta _routing required exists"); - mappingMd = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test").mapping(); + mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); } @@ -119,7 +118,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for green status"); ensureGreen(); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().execute().actionGet(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -128,12 +127,12 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -142,14 +141,14 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test").setId("1").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well } logger.info("--> creating another index (test2) by indexing into it"); - client().prepareIndex("test2").setId("1").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test2").setId("1").setSource("field1", "value1").get(); logger.info("--> verifying that the state is green"); ensureGreen(); @@ -159,7 +158,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> verifying that the state is green"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -168,12 +167,12 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> trying to get the indexed document on the first index"); - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -182,25 +181,25 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for two nodes and green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test").setId("1").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well } logger.info("--> opening index..."); - indicesAdmin().prepareOpen("test").execute().actionGet(); + indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().execute().actionGet(); + stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -209,11 +208,11 @@ public void testSimpleOpenClose() throws Exception { ); logger.info("--> trying to get the indexed document on the first round (before close and shutdown)"); - getResponse = client().prepareGet("test", "1").execute().actionGet(); + getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("2").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setId("2").setSource("field1", "value1").get(); } public void testJustMasterNode() throws Exception { @@ -223,7 +222,7 @@ public void testJustMasterNode() throws Exception { internalCluster().startNode(nonDataNode()); logger.info("--> create an index"); - indicesAdmin().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).execute().actionGet(); + indicesAdmin().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); logger.info("--> restarting master node"); internalCluster().fullRestart(new RestartCallback() { @@ -234,15 +233,11 @@ public Settings onNodeStopped(String nodeName) { }); logger.info("--> waiting for test index to be created"); - ClusterHealthResponse health = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setIndices("test") - .execute() - .actionGet(); + ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify we have an index"); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setIndices("test").execute().actionGet(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setIndices("test").get(); assertThat(clusterStateResponse.getState().metadata().hasIndex("test"), equalTo(true)); } @@ -254,9 +249,9 @@ public void testJustMasterNodeAndJustDataNode() { internalCluster().startDataOnlyNode(); logger.info("--> create an index"); - indicesAdmin().prepareCreate("test").execute().actionGet(); + indicesAdmin().prepareCreate("test").get(); - client().prepareIndex("test").setSource("field1", "value1").execute().actionGet(); + prepareIndex("test").setSource("field1", "value1").get(); } public void testTwoNodesSingleDoc() throws Exception { @@ -266,15 +261,14 @@ public void testTwoNodesSingleDoc() throws Exception { internalCluster().startNodes(2); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); ClusterHealthResponse health = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); @@ -285,20 +279,15 @@ public void testTwoNodesSingleDoc() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().execute().actionGet(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); logger.info("--> opening the index..."); - indicesAdmin().prepareOpen("test").execute().actionGet(); + indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); - health = clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForGreenStatus() - .setWaitForNodes("2") - .execute() - .actionGet(); + health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); @@ -339,7 +328,7 @@ public Settings onNodeStopped(final String nodeName) throws Exception { final String otherNode = nodes.get(0); logger.info("--> delete index and verify it is deleted"); final Client client = client(otherNode); - client.admin().indices().prepareDelete(indexName).execute().actionGet(); + client.admin().indices().prepareDelete(indexName).get(); assertFalse(indexExists(indexName, client)); logger.info("--> index deleted"); return super.onNodeStopped(nodeName); @@ -376,7 +365,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); @@ -453,7 +442,7 @@ public void testRecoverMissingAnalyzer() throws Exception { } }""").get(); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value one").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value one").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); @@ -501,7 +490,7 @@ public void testRecoverMissingAnalyzer() throws Exception { public void testArchiveBrokenClusterSettings() throws Exception { logger.info("--> starting one node"); internalCluster().startNode(); - client().prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); if (usually()) { ensureYellow(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java index a77201e1e141a..15a72e3534b50 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -42,11 +42,11 @@ public void testQuorumRecovery() throws Exception { final NumShards test = getNumShards("test"); logger.info("--> indexing..."); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); // We don't check for failures in the flush response: if we do we might get the following: // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed] flush(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); refresh(); for (int i = 0; i < 10; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index ec87a0d1fa2fa..b55dd5e207c41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -37,8 +37,7 @@ public Set waitForNoBlocksOnNode(TimeValue timeout, Client nodeCli .cluster() .prepareState() .setLocal(true) - .execute() - .actionGet() + .get() .getState() .blocks() .global(ClusterBlockLevel.METADATA_WRITE); @@ -56,81 +55,33 @@ public void testRecoverAfterDataNodes() { logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start data_node (1)"); Client data1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(dataOnlyNode())); assertThat( - master1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start master_node (2)"); Client master2 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master2.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - master2.admin() - .cluster() - .prepareState() - .setLocal(true) - .execute() - .actionGet() - .getState() - .blocks() - .global(ClusterBlockLevel.METADATA_WRITE), + master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 81149efb1596f..f05a83e861e52 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -96,31 +96,21 @@ public void testOneNodeRecoverFromGateway() throws Exception { ); assertAcked(prepareCreate("test").setMapping(mapping)); - client().prepareIndex("test") - .setId("10990239") + prepareIndex("test").setId("10990239") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).value(179).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("10990473") + .get(); + prepareIndex("test").setId("10990473") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("10990513") + .get(); + prepareIndex("test").setId("10990513") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).value(179).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("10990695") + .get(); + prepareIndex("test").setId("10990695") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).endArray().endObject()) - .execute() - .actionGet(); - client().prepareIndex("test") - .setId("11026351") + .get(); + prepareIndex("test").setId("11026351") .setSource(jsonBuilder().startObject().startArray("appAccountIds").value(14).endArray().endObject()) - .execute() - .actionGet(); + .get(); refresh(); assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)), 2); @@ -134,7 +124,7 @@ public void testOneNodeRecoverFromGateway() throws Exception { ensureYellow(); primaryTerms = assertAndCapturePrimaryTerms(primaryTerms); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)), 2); internalCluster().fullRestart(); @@ -143,7 +133,7 @@ public void testOneNodeRecoverFromGateway() throws Exception { ensureYellow(); primaryTerms = assertAndCapturePrimaryTerms(primaryTerms); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)), 2); } @@ -275,17 +265,9 @@ public void testSingleNodeNoFlush() throws Exception { public void testSingleNodeWithFlush() throws Exception { internalCluster().startNode(); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); flush(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("field", "value2").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); refresh(); assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 2); @@ -320,17 +302,9 @@ public void testTwoNodeFirstNodeCleared() throws Exception { final String firstNode = internalCluster().startNode(); internalCluster().startNode(); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); flush(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("field", "value2").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); refresh(); logger.info("Running Cluster Health (wait for the shards to startup)"); @@ -378,18 +352,10 @@ public void testLatestVersionLoaded() throws Exception { Settings node2DataPathSettings = internalCluster().dataPathSettings(nodes.get(1)); assertAcked(indicesAdmin().prepareCreate("test")); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("field", "value1").endObject()) - .execute() - .actionGet(); - indicesAdmin().prepareFlush().execute().actionGet(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().field("field", "value2").endObject()) - .execute() - .actionGet(); - indicesAdmin().prepareRefresh().execute().actionGet(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + indicesAdmin().prepareFlush().get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); + indicesAdmin().prepareRefresh().get(); logger.info("--> running cluster_health (wait for the shards to startup)"); ensureGreen(); @@ -405,16 +371,12 @@ public void testLatestVersionLoaded() throws Exception { internalCluster().stopRandomDataNode(); logger.info("--> one node is closed - start indexing data into the second one"); - client().prepareIndex("test") - .setId("3") - .setSource(jsonBuilder().startObject().field("field", "value3").endObject()) - .execute() - .actionGet(); + prepareIndex("test").setId("3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).get(); // TODO: remove once refresh doesn't fail immediately if there a master block: // https://github.com/elastic/elasticsearch/issues/9997 // clusterAdmin().prepareHealth("test").setWaitForYellowStatus().get(); logger.info("--> refreshing all indices after indexing is complete"); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); logger.info("--> checking if documents exist, there should be 3"); for (int i = 0; i < 10; i++) { @@ -442,9 +404,8 @@ public void testLatestVersionLoaded() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); - indicesAdmin().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet(); + .get(); + indicesAdmin().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).get(); logger.info("--> stopping the second node"); internalCluster().stopRandomDataNode(); @@ -465,7 +426,7 @@ public void testLatestVersionLoaded() throws Exception { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 3); } - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); assertThat(state.metadata().templates().get("template_1").patterns(), equalTo(Collections.singletonList("te*"))); assertThat(state.metadata().index("test").getAliases().get("test_alias"), notNullValue()); assertThat(state.metadata().index("test").getAliases().get("test_alias").filter(), notNullValue()); @@ -495,7 +456,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { logger.info("--> indexing docs"); int numDocs = randomIntBetween(1, 1024); for (int i = 0; i < numDocs; i++) { - client(primaryNode).prepareIndex("test").setSource("field", "value").execute().actionGet(); + client(primaryNode).prepareIndex("test").setSource("field", "value").get(); } client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get(); @@ -528,7 +489,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { public Settings onNodeStopped(String nodeName) throws Exception { // index some more documents; we expect to reuse the files that already exist on the replica for (int i = 0; i < moreDocs; i++) { - client(primaryNode).prepareIndex("test").setSource("field", "value").execute().actionGet(); + client(primaryNode).prepareIndex("test").setSource("field", "value").get(); } // prevent a sequence-number-based recovery from being possible diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 8cbce0cc098ed..e7988d447571a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -89,7 +89,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(100, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(100, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); if (randomBoolean()) { @@ -97,7 +97,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(0, 80)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(0, 80)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } ensureActivePeerRecoveryRetentionLeasesAdvanced(indexName); @@ -152,7 +152,7 @@ public void testRecentPrimaryInformation() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(10, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(10, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); internalCluster().stopNode(nodeWithReplica); if (randomBoolean()) { @@ -160,7 +160,7 @@ public void testRecentPrimaryInformation() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(10, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(10, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } CountDownLatch blockRecovery = new CountDownLatch(1); @@ -184,7 +184,7 @@ public void testRecentPrimaryInformation() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(50, 200)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(50, 200)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); assertBusy(() -> { @@ -235,14 +235,14 @@ public void testFullClusterRestartPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); indexRandom( randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(0, 80)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(0, 80)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); if (randomBoolean()) { indicesAdmin().prepareForceMerge(indexName).get(); @@ -281,7 +281,7 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); String nodeWithLowerMatching = randomFrom(internalCluster().nodesInclude(indexName)); @@ -293,7 +293,7 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(1, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); ensureActivePeerRecoveryRetentionLeasesAdvanced(indexName); String nodeWithHigherMatching = randomFrom(internalCluster().nodesInclude(indexName)); @@ -304,7 +304,7 @@ public void testPreferCopyWithHighestMatchingOperations() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(1, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } @@ -338,7 +338,7 @@ public void testDoNotCancelRecoveryForBrokenNode() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); indicesAdmin().prepareFlush(indexName).get(); String brokenNode = internalCluster().startDataOnlyNode(); @@ -384,7 +384,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(1, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).toList() + IntStream.range(0, randomIntBetween(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).toList() ); ensureActivePeerRecoveryRetentionLeasesAdvanced(indexName); assertAcked(indicesAdmin().prepareClose(indexName)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java index 313d1e686e1fd..fffa0ad05496b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -59,9 +60,9 @@ /** * A legacy version of {@link ReplicaShardAllocatorIT#testPreferCopyCanPerformNoopRecovery()} verifying * that the {@link ReplicaShardAllocator} prefers copies with matching sync_id. - * TODO: Remove this test in 9.0 */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +@UpdateForV9 // remove this test in v9 public class ReplicaShardAllocatorSyncIdIT extends ESIntegTestCase { private static final AtomicBoolean allowFlush = new AtomicBoolean(); @@ -170,7 +171,7 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(100, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(100, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); if (randomBoolean()) { indicesAdmin().prepareFlush(indexName).get(); @@ -227,7 +228,7 @@ public void testFullClusterRestartPerformNoopRecovery() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); if (randomBoolean()) { indicesAdmin().prepareFlush(indexName).get(); @@ -265,7 +266,7 @@ public void testSimulateRecoverySourceOnOldNode() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(200, 500)).mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).toList() + IntStream.range(0, between(200, 500)).mapToObj(n -> prepareIndex(indexName).setSource("f", "v")).toList() ); } if (randomBoolean()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index 129b83f664927..3a12856fb92b5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -81,7 +81,7 @@ public void testSimpleGet() { assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); logger.info("--> non realtime get 1"); response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); @@ -100,7 +100,7 @@ public void testSimpleGet() { assertThat(response.getIndex(), equalTo("test")); Set fields = new HashSet<>(response.getFields().keySet()); assertThat(fields, equalTo(Collections.emptySet())); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); logger.info("--> realtime get 1 (no source, explicit)"); response = client().prepareGet(indexOrAlias(), "1").setFetchSource(false).get(); @@ -108,7 +108,7 @@ public void testSimpleGet() { assertThat(response.getIndex(), equalTo("test")); fields = new HashSet<>(response.getFields().keySet()); assertThat(fields, equalTo(Collections.emptySet())); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); logger.info("--> realtime get 1 (no type)"); response = client().prepareGet(indexOrAlias(), "1").get(); @@ -121,7 +121,7 @@ public void testSimpleGet() { response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1")); assertThat(response.getField("field2"), nullValue()); @@ -155,7 +155,7 @@ public void testSimpleGet() { response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getSourceAsBytes(), nullValue()); + assertThat(response.getSourceAsBytesRef(), nullValue()); assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1")); assertThat(response.getField("field2"), nullValue()); @@ -163,12 +163,12 @@ public void testSimpleGet() { response = client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").setFetchSource(true).get(); assertThat(response.isExists(), equalTo(true)); assertThat(response.getIndex(), equalTo("test")); - assertThat(response.getSourceAsBytes(), not(nullValue())); + assertThat(response.getSourceAsBytesRef(), not(nullValue())); assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1")); assertThat(response.getField("field2"), nullValue()); logger.info("--> update doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").get(); logger.info("--> realtime get 1"); response = client().prepareGet(indexOrAlias(), "1").get(); @@ -178,7 +178,7 @@ public void testSimpleGet() { assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_1")); logger.info("--> update doc 1 again"); - client().prepareIndex("test").setId("1").setSource("field1", "value1_2", "field2", "value2_2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1_2", "field2", "value2_2").get(); response = client().prepareGet(indexOrAlias(), "1").get(); assertThat(response.isExists(), equalTo(true)); @@ -202,10 +202,7 @@ public void testGetWithAliasPointingToMultipleIndices() { } else { indicesAdmin().prepareCreate("index3").addAlias(new Alias("alias1").indexRouting("1").writeIndex(true)).get(); } - DocWriteResponse indexResponse = client().prepareIndex("index1") - .setId("id") - .setSource(Collections.singletonMap("foo", "bar")) - .get(); + DocWriteResponse indexResponse = prepareIndex("index1").setId("id").setSource(Collections.singletonMap("foo", "bar")).get(); assertThat(indexResponse.status().getStatus(), equalTo(RestStatus.CREATED.getStatus())); IllegalArgumentException exception = expectThrows( @@ -232,7 +229,7 @@ public void testSimpleMultiGet() throws Exception { assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } response = client().prepareMultiGet() @@ -271,7 +268,7 @@ public void testSimpleMultiGet() throws Exception { .get(); assertThat(response.getResponses().length, equalTo(2)); - assertThat(response.getResponses()[0].getResponse().getSourceAsBytes(), nullValue()); + assertThat(response.getResponses()[0].getResponse().getSourceAsBytesRef(), nullValue()); assertThat(response.getResponses()[0].getResponse().getField("field").getValues().get(0).toString(), equalTo("value1")); } @@ -294,7 +291,7 @@ public void testGetDocWithMultivaluedFields() throws Exception { assertThat(response.isExists(), equalTo(false)); assertThat(response.isExists(), equalTo(false)); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); response = client().prepareGet("test", "1").setStoredFields("field").get(); assertThat(response.isExists(), equalTo(true)); @@ -325,7 +322,7 @@ public void testGetWithVersion() { assertThat(response.isExists(), equalTo(false)); logger.info("--> index doc 1"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -369,7 +366,7 @@ public void testGetWithVersion() { } logger.info("--> index doc 1 again, so increasing the version"); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); // From translog: @@ -424,7 +421,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog @@ -474,7 +471,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); for (int i = 0; i < 3; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } // Version from translog @@ -547,8 +544,7 @@ public void testGetFieldsNonLeafField() throws Exception { .setSettings(Settings.builder().put("index.refresh_interval", -1)) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject()) .get(); @@ -625,7 +621,7 @@ public void testGetFieldsComplexField() throws Exception { logger.info("indexing documents"); - client().prepareIndex("my-index").setId("1").setSource(source, XContentType.JSON).get(); + prepareIndex("my-index").setId("1").setSource(source, XContentType.JSON).get(); logger.info("checking real time retrieval"); @@ -720,7 +716,7 @@ public void testUngeneratedFieldsThatAreAlwaysStored() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); ensureGreen(); - client().prepareIndex("test").setId("1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get(); String[] fieldsList = { "_routing" }; // before refresh - document is only in translog @@ -749,7 +745,7 @@ public void testUngeneratedFieldsNotPartOfSourceStored() throws IOException { "text": "some text." } """; - client().prepareIndex("test").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get(); + prepareIndex("test").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get(); String[] fieldsList = { "_routing" }; // before refresh - document is only in translog assertGetFieldsAlwaysWorks(indexOrAlias(), "1", fieldsList, "1"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java index 33038a0fb32a0..c9432ede04911 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java @@ -44,11 +44,7 @@ public void testGetFromTranslog() throws Exception { // There hasn't been any switches from unsafe to safe map assertThat(response.segmentGeneration(), equalTo(-1L)); - var indexResponse = client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1") - .setRefreshPolicy(RefreshPolicy.NONE) - .get(); + var indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(RefreshPolicy.NONE).get(); response = getFromTranslog(indexOrAlias(), "1"); assertNotNull(response.getResult()); assertThat(response.getResult().isExists(), equalTo(true)); @@ -61,7 +57,7 @@ public void testGetFromTranslog() throws Exception { assertThat(response.getResult().isExists(), equalTo(false)); assertThat(response.segmentGeneration(), equalTo(-1L)); - indexResponse = client().prepareIndex("test").setSource("field1", "value2").get(); + indexResponse = prepareIndex("test").setSource("field1", "value2").get(); response = getFromTranslog(indexOrAlias(), indexResponse.getId()); assertNotNull(response.getResult()); assertThat(response.getResult().isExists(), equalTo(true)); @@ -74,11 +70,11 @@ public void testGetFromTranslog() throws Exception { assertThat(response.segmentGeneration(), equalTo(-1L)); // After two refreshes the LiveVersionMap switches back to append-only and stops tracking IDs // Refreshing with empty LiveVersionMap doesn't cause the switch, see {@link LiveVersionMap.Maps#shouldInheritSafeAccess()}. - client().prepareIndex("test").setSource("field1", "value3").get(); + prepareIndex("test").setSource("field1", "value3").get(); refresh("test"); refresh("test"); // An optimized index operation marks the maps as unsafe - client().prepareIndex("test").setSource("field1", "value4").get(); + prepareIndex("test").setSource("field1", "value4").get(); response = getFromTranslog(indexOrAlias(), "non-existent"); assertNull(response.getResult()); assertThat(response.segmentGeneration(), greaterThan(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java index fd14b18bb8a3b..2d8a48cf48668 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java @@ -48,7 +48,7 @@ public void testShardMultiGetFromTranslog() throws Exception { // Do a single get to enable storing locations in translog. Otherwise, we could get unwanted refreshes that // prune the LiveVersionMap and would make the test fail/flaky. - var indexResponse = client().prepareIndex("test").setId("0").setSource("field1", "value2").get(); + var indexResponse = prepareIndex("test").setId("0").setSource("field1", "value2").get(); client().prepareGet("test", indexResponse.getId()).get(); var mgetIds = List.of("1", "2", "3"); @@ -107,7 +107,7 @@ public void testShardMultiGetFromTranslog() throws Exception { } assertThat(response.segmentGeneration(), equalTo(-1L)); - indexResponse = client().prepareIndex("test").setSource("field1", "value2").get(); + indexResponse = prepareIndex("test").setSource("field1", "value2").get(); response = getFromTranslog(indexOrAlias(), List.of(indexResponse.getId())); multiGetShardResponse = response.multiGetShardResponse(); assertThat(getLocations(multiGetShardResponse).size(), equalTo(1)); @@ -131,11 +131,11 @@ public void testShardMultiGetFromTranslog() throws Exception { assertThat(response.segmentGeneration(), equalTo(-1L)); // After two refreshes the LiveVersionMap switches back to append-only and stops tracking IDs // Refreshing with empty LiveVersionMap doesn't cause the switch, see {@link LiveVersionMap.Maps#shouldInheritSafeAccess()}. - client().prepareIndex("test").setSource("field1", "value3").get(); + prepareIndex("test").setSource("field1", "value3").get(); refresh("test"); refresh("test"); // An optimized index operation marks the maps as unsafe - client().prepareIndex("test").setSource("field1", "value4").get(); + prepareIndex("test").setSource("field1", "value4").get(); response = getFromTranslog(indexOrAlias(), List.of("non-existent")); multiGetShardResponse = response.multiGetShardResponse(); assertThat(getLocations(multiGetShardResponse).size(), equalTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java index 7a9fd0b6ccf60..eda8a4eb9e459 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthCancellationIT.java @@ -90,7 +90,7 @@ public void testCancellation() throws Exception { } final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); - final PlainActionFuture findHealthNodeFuture = PlainActionFuture.newFuture(); + final PlainActionFuture findHealthNodeFuture = new PlainActionFuture<>(); // the health node might take a bit of time to be assigned by the persistent task framework so we wait until we have a health // node in the cluster before proceeding with the test // proceeding with the execution before the health node assignment would yield a non-deterministic behaviour as we diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java index 0717d4b306ed2..660d6028486a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java @@ -45,210 +45,207 @@ public void setup() { } public void testEachMasterPublishesTheirThresholds() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - int numberOfNodes = 3; - Map watermarkByNode = new HashMap<>(); - Map maxHeadroomByNode = new HashMap<>(); - Map shardLimitsPerNode = new HashMap<>(); - for (int i = 0; i < numberOfNodes; i++) { - ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); - String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); - ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); - String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); - watermarkByNode.put(nodeName, customWatermark); - maxHeadroomByNode.put(nodeName, customMaxHeadroom); - shardLimitsPerNode.put(nodeName, customShardLimits); - } - ensureStableCluster(numberOfNodes); + final InternalTestCluster internalCluster = internalCluster(); + int numberOfNodes = 3; + Map watermarkByNode = new HashMap<>(); + Map maxHeadroomByNode = new HashMap<>(); + Map shardLimitsPerNode = new HashMap<>(); + for (int i = 0; i < numberOfNodes; i++) { + ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); + String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); + ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); + String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); + watermarkByNode.put(nodeName, customWatermark); + maxHeadroomByNode.put(nodeName, customMaxHeadroom); + shardLimitsPerNode.put(nodeName, customShardLimits); + } + ensureStableCluster(numberOfNodes); - String electedMaster = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + String electedMaster = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); + } - // Stop the master to ensure another node will become master with a different watermark - internalCluster.stopNode(electedMaster); - ensureStableCluster(numberOfNodes - 1); - electedMaster = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + // Stop the master to ensure another node will become master with a different watermark + internalCluster.stopNode(electedMaster); + ensureStableCluster(numberOfNodes - 1); + electedMaster = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); + } - // restart the whole cluster - internalCluster.fullRestart(); - ensureStableCluster(internalCluster.numDataAndMasterNodes()); - String electedMasterAfterRestart = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMasterAfterRestart))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMasterAfterRestart))); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMasterAfterRestart)); - } + // restart the whole cluster + internalCluster.fullRestart(); + ensureStableCluster(internalCluster.numDataAndMasterNodes()); + String electedMasterAfterRestart = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMasterAfterRestart))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMasterAfterRestart))); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMasterAfterRestart)); } } public void testWatermarkSettingUpdate() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - int numberOfNodes = 3; - ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); - String initialWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); - ByteSizeValue initialMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - HealthMetadata.ShardLimits initialShardLimits = new HealthMetadata.ShardLimits( - randomIntBetween(1, 1000), - randomIntBetween(1001, 2000) - ); - for (int i = 0; i < numberOfNodes; i++) { - startNode(internalCluster, initialWatermark, initialMaxHeadroom.toString(), initialShardLimits); - } + final InternalTestCluster internalCluster = internalCluster(); + int numberOfNodes = 3; + ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); + String initialWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); + ByteSizeValue initialMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + HealthMetadata.ShardLimits initialShardLimits = new HealthMetadata.ShardLimits( + randomIntBetween(1, 1000), + randomIntBetween(1001, 2000) + ); + for (int i = 0; i < numberOfNodes; i++) { + startNode(internalCluster, initialWatermark, initialMaxHeadroom.toString(), initialShardLimits); + } - randomBytes = ByteSizeValue.ofBytes(randomLongBetween(101, 200)); - String updatedLowWatermark = percentageMode ? randomIntBetween(40, 59) + "%" : randomBytes.toString(); - ByteSizeValue updatedLowMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - randomBytes = ByteSizeValue.ofBytes(randomLongBetween(50, 100)); - String updatedHighWatermark = percentageMode ? randomIntBetween(60, 90) + "%" : randomBytes.toString(); - ByteSizeValue updatedHighMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - randomBytes = ByteSizeValue.ofBytes(randomLongBetween(5, 10)); - String updatedFloodStageWatermark = percentageMode ? randomIntBetween(91, 95) + "%" : randomBytes.toString(); - ByteSizeValue updatedFloodStageMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - HealthMetadata.ShardLimits updatedShardLimits = new HealthMetadata.ShardLimits( - randomIntBetween(3000, 4000), - randomIntBetween(4001, 5000) - ); + randomBytes = ByteSizeValue.ofBytes(randomLongBetween(101, 200)); + String updatedLowWatermark = percentageMode ? randomIntBetween(40, 59) + "%" : randomBytes.toString(); + ByteSizeValue updatedLowMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + randomBytes = ByteSizeValue.ofBytes(randomLongBetween(50, 100)); + String updatedHighWatermark = percentageMode ? randomIntBetween(60, 90) + "%" : randomBytes.toString(); + ByteSizeValue updatedHighMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + randomBytes = ByteSizeValue.ofBytes(randomLongBetween(5, 10)); + String updatedFloodStageWatermark = percentageMode ? randomIntBetween(91, 95) + "%" : randomBytes.toString(); + ByteSizeValue updatedFloodStageMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + HealthMetadata.ShardLimits updatedShardLimits = new HealthMetadata.ShardLimits( + randomIntBetween(3000, 4000), + randomIntBetween(4001, 5000) + ); - ensureStableCluster(numberOfNodes); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(initialWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(initialMaxHeadroom)); + ensureStableCluster(numberOfNodes); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(initialWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(initialMaxHeadroom)); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, initialShardLimits); - } - var settingsBuilder = Settings.builder() - .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), updatedLowWatermark) - .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), updatedHighWatermark) - .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), updatedFloodStageWatermark) - .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), updatedShardLimits.maxShardsPerNode()) - .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE_FROZEN.getKey(), updatedShardLimits.maxShardsPerNodeFrozen()); - - if (percentageMode) { - settingsBuilder.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), updatedLowMaxHeadroom) - .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey(), updatedHighMaxHeadroom) - .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey(), updatedFloodStageMaxHeadroom); - } - updateSettings(internalCluster, settingsBuilder); - - assertBusy(() -> { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); - assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); - assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, updatedShardLimits); - }); - - var electedMaster = internalCluster.getMasterName(); - - // Force a master fail-over but, since the settings were manually changed, we should return the manually set values - internalCluster.stopNode(electedMaster); - ensureStableCluster(numberOfNodes - 1); - - assertBusy(() -> { - var healthMetadata = HealthMetadata.getFromClusterState( - internalCluster.clusterService(internalCluster.getMasterName()).state() - ); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); - assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); - assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, updatedShardLimits); - }); - - // restart the whole cluster - internalCluster.fullRestart(); - ensureStableCluster(internalCluster.numDataAndMasterNodes()); - String electedMasterAfterRestart = internalCluster.getMasterName(); - assertBusy(() -> { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService(electedMasterAfterRestart).state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); - assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); - assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); - - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, updatedShardLimits); - }); + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, initialShardLimits); + } + var settingsBuilder = Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), updatedLowWatermark) + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), updatedHighWatermark) + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), updatedFloodStageWatermark) + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), updatedShardLimits.maxShardsPerNode()) + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE_FROZEN.getKey(), updatedShardLimits.maxShardsPerNodeFrozen()); + + if (percentageMode) { + settingsBuilder.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), updatedLowMaxHeadroom) + .put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey(), updatedHighMaxHeadroom) + .put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey(), updatedFloodStageMaxHeadroom); } + updateSettings(internalCluster, settingsBuilder); + + assertBusy(() -> { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); + assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); + assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, updatedShardLimits); + }); + + var electedMaster = internalCluster.getMasterName(); + + // Force a master fail-over but, since the settings were manually changed, we should return the manually set values + internalCluster.stopNode(electedMaster); + ensureStableCluster(numberOfNodes - 1); + + assertBusy(() -> { + var healthMetadata = HealthMetadata.getFromClusterState( + internalCluster.clusterService(internalCluster.getMasterName()).state() + ); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); + assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); + assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, updatedShardLimits); + }); + + // restart the whole cluster + internalCluster.fullRestart(); + ensureStableCluster(internalCluster.numDataAndMasterNodes()); + String electedMasterAfterRestart = internalCluster.getMasterName(); + assertBusy(() -> { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService(electedMasterAfterRestart).state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(updatedHighWatermark)); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(updatedHighMaxHeadroom)); + assertThat(diskMetadata.describeFloodStageWatermark(), equalTo(updatedFloodStageWatermark)); + assertThat(diskMetadata.floodStageMaxHeadroom(), equalTo(updatedFloodStageMaxHeadroom)); + + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, updatedShardLimits); + }); } public void testHealthNodeToggleEnabled() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - int numberOfNodes = 3; - Map watermarkByNode = new HashMap<>(); - Map maxHeadroomByNode = new HashMap<>(); - Map shardLimitsPerNode = new HashMap<>(); - for (int i = 0; i < numberOfNodes; i++) { - ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); - String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); - ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; - var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); - String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); - watermarkByNode.put(nodeName, customWatermark); - maxHeadroomByNode.put(nodeName, customMaxHeadroom); - shardLimitsPerNode.put(nodeName, customShardLimits); - } + final InternalTestCluster internalCluster = internalCluster(); + int numberOfNodes = 3; + Map watermarkByNode = new HashMap<>(); + Map maxHeadroomByNode = new HashMap<>(); + Map shardLimitsPerNode = new HashMap<>(); + for (int i = 0; i < numberOfNodes; i++) { + ByteSizeValue randomBytes = ByteSizeValue.ofBytes(randomLongBetween(6, 19)); + String customWatermark = percentageMode ? randomIntBetween(86, 94) + "%" : randomBytes.toString(); + ByteSizeValue customMaxHeadroom = percentageMode ? randomBytes : ByteSizeValue.MINUS_ONE; + var customShardLimits = new HealthMetadata.ShardLimits(randomIntBetween(1, 1000), randomIntBetween(1001, 2000)); + String nodeName = startNode(internalCluster, customWatermark, customMaxHeadroom.toString(), customShardLimits); + watermarkByNode.put(nodeName, customWatermark); + maxHeadroomByNode.put(nodeName, customMaxHeadroom); + shardLimitsPerNode.put(nodeName, customShardLimits); + } - String electedMaster = internalCluster.getMasterName(); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + String electedMaster = internalCluster.getMasterName(); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); + } - // toggle the health metadata service so we can check that the posted settings are still from the master node - updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), false)); + // toggle the health metadata service so we can check that the posted settings are still from the master node + updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), false)); - updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), true)); + updateSettings(internalCluster, Settings.builder().put(HealthNodeTaskExecutor.ENABLED_SETTING.getKey(), true)); - electedMaster = internalCluster.getMasterName(); - ensureStableCluster(numberOfNodes); - { - var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); - var diskMetadata = healthMetadata.getDiskMetadata(); - assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); - assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); + electedMaster = internalCluster.getMasterName(); + ensureStableCluster(numberOfNodes); + { + var healthMetadata = HealthMetadata.getFromClusterState(internalCluster.clusterService().state()); + var diskMetadata = healthMetadata.getDiskMetadata(); + assertThat(diskMetadata.describeHighWatermark(), equalTo(watermarkByNode.get(electedMaster))); + assertThat(diskMetadata.highMaxHeadroom(), equalTo(maxHeadroomByNode.get(electedMaster))); - var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); - assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); - } + var shardLimitsMetadata = healthMetadata.getShardLimitsMetadata(); + assertEquals(shardLimitsMetadata, shardLimitsPerNode.get(electedMaster)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java index 2e741d6691d24..14697cc6533c1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java @@ -52,35 +52,34 @@ protected Settings nodeSettings(int ordinal, Settings otherSettings) { } public void testThatHealthNodeDataIsFetchedAndPassedToIndicators() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - ensureStableCluster(internalCluster.getNodeNames().length); - waitForAllNodesToReportHealth(); - for (String node : internalCluster.getNodeNames()) { - HealthService healthService = internalCluster.getInstance(HealthService.class, node); - AtomicBoolean onResponseCalled = new AtomicBoolean(false); - ActionListener> listener = new ActionListener<>() { - @Override - public void onResponse(List resultList) { - /* - * The following is really just asserting that the TestHealthIndicatorService's calculate method was called. The - * assertions that it actually got the HealthInfo data are in the calculate method of TestHealthIndicatorService. - */ - assertNotNull(resultList); - assertThat(resultList.size(), equalTo(1)); - HealthIndicatorResult testIndicatorResult = resultList.get(0); - assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); - assertThat(testIndicatorResult.symptom(), equalTo(TestHealthIndicatorService.SYMPTOM)); - onResponseCalled.set(true); - } - - @Override - public void onFailure(Exception e) { - throw new RuntimeException(e); - } - }; - healthService.getHealth(internalCluster.client(node), TestHealthIndicatorService.NAME, true, 1000, listener); - assertBusy(() -> assertThat(onResponseCalled.get(), equalTo(true))); - } + final InternalTestCluster internalCluster = internalCluster(); + ensureStableCluster(internalCluster.getNodeNames().length); + waitForAllNodesToReportHealth(); + for (String node : internalCluster.getNodeNames()) { + HealthService healthService = internalCluster.getInstance(HealthService.class, node); + AtomicBoolean onResponseCalled = new AtomicBoolean(false); + ActionListener> listener = new ActionListener<>() { + @Override + public void onResponse(List resultList) { + /* + * The following is really just asserting that the TestHealthIndicatorService's calculate method was called. The + * assertions that it actually got the HealthInfo data are in the calculate method of TestHealthIndicatorService. + */ + assertNotNull(resultList); + assertThat(resultList.size(), equalTo(1)); + HealthIndicatorResult testIndicatorResult = resultList.get(0); + assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); + assertThat(testIndicatorResult.symptom(), equalTo(TestHealthIndicatorService.SYMPTOM)); + onResponseCalled.set(true); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }; + healthService.getHealth(internalCluster.client(node), TestHealthIndicatorService.NAME, true, 1000, listener); + assertBusy(() -> assertThat(onResponseCalled.get(), equalTo(true))); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java index 88852b3e00f23..02816688f1bbb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.junit.annotations.TestLogging; -import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Map; @@ -35,78 +34,66 @@ public class UpdateHealthInfoCacheIT extends ESIntegTestCase { private static final DiskHealthInfo GREEN = new DiskHealthInfo(HealthStatus.GREEN, null); public void testNodesReportingHealth() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); - DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); - assertThat(healthNode, notNullValue()); - assertBusy(() -> assertResultsCanBeFetched(internalCluster, healthNode, List.of(nodeIds), null)); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); + DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); + assertThat(healthNode, notNullValue()); + assertBusy(() -> assertResultsCanBeFetched(internalCluster, healthNode, List.of(nodeIds), null)); } public void testNodeLeavingCluster() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - Collection nodes = getNodes(internalCluster).values(); - DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); - assertThat(healthNode, notNullValue()); - DiscoveryNode nodeToLeave = nodes.stream().filter(node -> { - boolean isMaster = node.getName().equals(internalCluster.getMasterName()); - boolean isHealthNode = node.getId().equals(healthNode.getId()); - // We have dedicated tests for master and health node - return isMaster == false && isHealthNode == false; - }).findAny().orElseThrow(); - internalCluster.stopNode(nodeToLeave.getName()); - assertBusy( - () -> assertResultsCanBeFetched( - internalCluster, - healthNode, - nodes.stream().filter(node -> node.equals(nodeToLeave) == false).map(DiscoveryNode::getId).toList(), - nodeToLeave.getId() - ) - ); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + Collection nodes = getNodes(internalCluster).values(); + DiscoveryNode healthNode = waitAndGetHealthNode(internalCluster); + assertThat(healthNode, notNullValue()); + DiscoveryNode nodeToLeave = nodes.stream().filter(node -> { + boolean isMaster = node.getName().equals(internalCluster.getMasterName()); + boolean isHealthNode = node.getId().equals(healthNode.getId()); + // We have dedicated tests for master and health node + return isMaster == false && isHealthNode == false; + }).findAny().orElseThrow(); + internalCluster.stopNode(nodeToLeave.getName()); + assertBusy( + () -> assertResultsCanBeFetched( + internalCluster, + healthNode, + nodes.stream().filter(node -> node.equals(nodeToLeave) == false).map(DiscoveryNode::getId).toList(), + nodeToLeave.getId() + ) + ); } @TestLogging(value = "org.elasticsearch.health.node:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/97213") public void testHealthNodeFailOver() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); - DiscoveryNode healthNodeToBeShutDown = waitAndGetHealthNode(internalCluster); - assertThat(healthNodeToBeShutDown, notNullValue()); - internalCluster.restartNode(healthNodeToBeShutDown.getName()); - ensureStableCluster(nodeIds.length); - DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); - assertThat(newHealthNode, notNullValue()); - logger.info("Previous health node {}, new health node {}.", healthNodeToBeShutDown, newHealthNode); - assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); + DiscoveryNode healthNodeToBeShutDown = waitAndGetHealthNode(internalCluster); + assertThat(healthNodeToBeShutDown, notNullValue()); + internalCluster.restartNode(healthNodeToBeShutDown.getName()); + ensureStableCluster(nodeIds.length); + DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); + assertThat(newHealthNode, notNullValue()); + logger.info("Previous health node {}, new health node {}.", healthNodeToBeShutDown, newHealthNode); + assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); } @TestLogging(value = "org.elasticsearch.health.node:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/97213") public void testMasterFailure() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - decreasePollingInterval(internalCluster); - String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); - DiscoveryNode healthNodeBeforeIncident = waitAndGetHealthNode(internalCluster); - assertThat(healthNodeBeforeIncident, notNullValue()); - String masterName = internalCluster.getMasterName(); - logger.info("Restarting elected master node {}.", masterName); - internalCluster.restartNode(masterName); - ensureStableCluster(nodeIds.length); - DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); - assertThat(newHealthNode, notNullValue()); - assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); - } catch (IOException e) { - throw new RuntimeException("Failed to close internal cluster: " + e.getMessage(), e); - } + final InternalTestCluster internalCluster = internalCluster(); + decreasePollingInterval(internalCluster); + String[] nodeIds = getNodes(internalCluster).keySet().toArray(new String[0]); + DiscoveryNode healthNodeBeforeIncident = waitAndGetHealthNode(internalCluster); + assertThat(healthNodeBeforeIncident, notNullValue()); + String masterName = internalCluster.getMasterName(); + logger.info("Restarting elected master node {}.", masterName); + internalCluster.restartNode(masterName); + ensureStableCluster(nodeIds.length); + DiscoveryNode newHealthNode = waitAndGetHealthNode(internalCluster); + assertThat(newHealthNode, notNullValue()); + assertBusy(() -> assertResultsCanBeFetched(internalCluster, newHealthNode, List.of(nodeIds), null)); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java index 1cab207fda30c..2f9ef3c80d23c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; import java.util.List; import java.util.Map; @@ -30,41 +29,39 @@ public class DiskHealthIndicatorServiceIT extends ESIntegTestCase { public void testGreen() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - internalCluster.startMasterOnlyNode(); - internalCluster.startDataOnlyNode(); - ensureStableCluster(internalCluster.getNodeNames().length); - waitForAllNodesToReportHealth(); - for (String node : internalCluster.getNodeNames()) { - HealthService healthService = internalCluster.getInstance(HealthService.class, node); - List resultList = getHealthServiceResults(healthService, node); - assertNotNull(resultList); - assertThat(resultList.size(), equalTo(1)); - HealthIndicatorResult testIndicatorResult = resultList.get(0); - assertThat(testIndicatorResult.status(), equalTo(HealthStatus.GREEN)); - assertThat(testIndicatorResult.symptom(), equalTo("The cluster has enough available disk space.")); - } + final var internalCluster = internalCluster(); + internalCluster.startMasterOnlyNode(); + internalCluster.startDataOnlyNode(); + ensureStableCluster(internalCluster.getNodeNames().length); + waitForAllNodesToReportHealth(); + for (String node : internalCluster.getNodeNames()) { + HealthService healthService = internalCluster.getInstance(HealthService.class, node); + List resultList = getHealthServiceResults(healthService, node); + assertNotNull(resultList); + assertThat(resultList.size(), equalTo(1)); + HealthIndicatorResult testIndicatorResult = resultList.get(0); + assertThat(testIndicatorResult.status(), equalTo(HealthStatus.GREEN)); + assertThat(testIndicatorResult.symptom(), equalTo("The cluster has enough available disk space.")); } } public void testRed() throws Exception { - try (InternalTestCluster internalCluster = internalCluster()) { - internalCluster.startMasterOnlyNode(getVeryLowWatermarksSettings()); - internalCluster.startDataOnlyNode(getVeryLowWatermarksSettings()); - ensureStableCluster(internalCluster.getNodeNames().length); - waitForAllNodesToReportHealth(); - for (String node : internalCluster.getNodeNames()) { - HealthService healthService = internalCluster.getInstance(HealthService.class, node); - List resultList = getHealthServiceResults(healthService, node); - assertNotNull(resultList); - assertThat(resultList.size(), equalTo(1)); - HealthIndicatorResult testIndicatorResult = resultList.get(0); - assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); - assertThat( - testIndicatorResult.symptom(), - equalTo("2 nodes with roles: [data, master] are out of disk or running low on disk space.") - ); - } + final var internalCluster = internalCluster(); + internalCluster.startMasterOnlyNode(getVeryLowWatermarksSettings()); + internalCluster.startDataOnlyNode(getVeryLowWatermarksSettings()); + ensureStableCluster(internalCluster.getNodeNames().length); + waitForAllNodesToReportHealth(); + for (String node : internalCluster.getNodeNames()) { + HealthService healthService = internalCluster.getInstance(HealthService.class, node); + List resultList = getHealthServiceResults(healthService, node); + assertNotNull(resultList); + assertThat(resultList.size(), equalTo(1)); + HealthIndicatorResult testIndicatorResult = resultList.get(0); + assertThat(testIndicatorResult.status(), equalTo(HealthStatus.RED)); + assertThat( + testIndicatorResult.symptom(), + equalTo("2 nodes with roles: [data, master] are out of disk or running low on disk space.") + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index 24372978834c6..443d0c384a058 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -39,6 +38,7 @@ import java.util.Objects; import java.util.function.BiConsumer; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -73,7 +73,7 @@ public void testFinalPipelineCantChangeDestination() { final IllegalStateException e = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat( e, @@ -93,7 +93,7 @@ public void testFinalPipelineCantRerouteDestination() { final IllegalStateException e = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat( e, @@ -118,15 +118,15 @@ public void testFinalPipelineOfOldDestinationIsNotInvoked() { {"processors": [{"final": {"exists":"no_such_field"}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertFalse(target.getHits().getAt(0).getSourceAsMap().containsKey("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertFalse(response.getHits().getAt(0).getSourceAsMap().containsKey("final")); + }); } public void testFinalPipelineOfNewDestinationIsInvoked() { @@ -144,15 +144,15 @@ public void testFinalPipelineOfNewDestinationIsInvoked() { {"processors": [{"final": {}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertEquals(true, target.getHits().getAt(0).getSourceAsMap().get("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertEquals(true, response.getHits().getAt(0).getSourceAsMap().get("final")); + }); } public void testDefaultPipelineOfNewDestinationIsNotInvoked() { @@ -170,15 +170,15 @@ public void testDefaultPipelineOfNewDestinationIsNotInvoked() { {"processors": [{"final": {}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertFalse(target.getHits().getAt(0).getSourceAsMap().containsKey("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertFalse(response.getHits().getAt(0).getSourceAsMap().containsKey("final")); + }); } public void testDefaultPipelineOfRerouteDestinationIsInvoked() { @@ -196,15 +196,15 @@ public void testDefaultPipelineOfRerouteDestinationIsInvoked() { {"processors": [{"final": {}}]}"""); clusterAdmin().putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)).actionGet(); - DocWriteResponse indexResponse = client().prepareIndex("index") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertEquals(RestStatus.CREATED, indexResponse.status()); - SearchResponse target = prepareSearch("target").get(); - assertEquals(1, target.getHits().getTotalHits().value); - assertTrue(target.getHits().getAt(0).getSourceAsMap().containsKey("final")); + assertResponse(prepareSearch("target"), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertTrue(response.getHits().getAt(0).getSourceAsMap().containsKey("final")); + }); } public void testAvoidIndexingLoop() { @@ -224,8 +224,7 @@ public void testAvoidIndexingLoop() { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> client().prepareIndex("index") - .setId("1") + () -> prepareIndex("index").setId("1") .setSource(Map.of("dest", "index")) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get() @@ -243,7 +242,7 @@ public void testFinalPipeline() { // this asserts that the final_pipeline was used, without us having to actually create the pipeline etc. final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat(e, hasToString(containsString("pipeline with id [final_pipeline] does not exist"))); } @@ -257,7 +256,7 @@ public void testRequestPipelineAndFinalPipeline() { clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); - final IndexRequestBuilder index = client().prepareIndex("index").setId("1"); + final IndexRequestBuilder index = prepareIndex("index").setId("1"); index.setSource(Map.of("field", "value")); index.setPipeline("request_pipeline"); index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); @@ -285,7 +284,7 @@ public void testDefaultAndFinalPipeline() { .put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline") .build(); createIndex("index", settings); - final IndexRequestBuilder index = client().prepareIndex("index").setId("1"); + final IndexRequestBuilder index = prepareIndex("index").setId("1"); index.setSource(Map.of("field", "value")); index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); final DocWriteResponse response = index.get(); @@ -332,7 +331,7 @@ public void testDefaultAndFinalPipelineFromTemplates() { .setOrder(finalPipelineOrder) .setSettings(finalPipelineSettings) .get(); - final IndexRequestBuilder index = client().prepareIndex("index").setId("1"); + final IndexRequestBuilder index = prepareIndex("index").setId("1"); index.setSource(Map.of("field", "value")); index.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); final DocWriteResponse response = index.get(); @@ -370,7 +369,7 @@ public void testHighOrderFinalPipelinePreferred() throws IOException { // this asserts that the high_order_final_pipeline was selected, without us having to actually create the pipeline etc. final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); assertThat(e, hasToString(containsString("pipeline with id [high_order_final_pipeline] does not exist"))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java index 41bdf944edd59..14d9cf9e56eae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -27,6 +26,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -37,40 +37,51 @@ public class HiddenIndexIT extends ESIntegTestCase { public void testHiddenIndexSearch() { assertAcked(indicesAdmin().prepareCreate("hidden-index").setSettings(Settings.builder().put("index.hidden", true).build()).get()); - client().prepareIndex("hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); // default not visible to wildcard expansion - SearchResponse searchResponse = prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000) - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - boolean matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertFalse(matchedHidden); + assertResponse( + prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()), + response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertFalse(matchedHidden); + } + ); // direct access allowed - searchResponse = prepareSearch("hidden-index").setSize(1000).setQuery(QueryBuilders.matchAllQuery()).get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + assertResponse(prepareSearch("hidden-index").setSize(1000).setQuery(QueryBuilders.matchAllQuery()), response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + }); // with indices option to include hidden - searchResponse = prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000) - .setQuery(QueryBuilders.matchAllQuery()) - .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) - .get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + assertResponse( + prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000) + .setQuery(QueryBuilders.matchAllQuery()) + .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), + response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + } + ); // implicit based on use of pattern starting with . and a wildcard assertAcked(indicesAdmin().prepareCreate(".hidden-index").setSettings(Settings.builder().put("index.hidden", true).build()).get()); - client().prepareIndex(".hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - searchResponse = prepareSearch(randomFrom(".*", ".hidden-*")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()).get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> ".hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + prepareIndex(".hidden-index").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + assertResponse(prepareSearch(randomFrom(".*", ".hidden-*")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()), response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> ".hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + }); // make index not hidden updateIndexSettings(Settings.builder().put("index.hidden", false), "hidden-index"); - searchResponse = prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()).get(); - matchedHidden = Arrays.stream(searchResponse.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); - assertTrue(matchedHidden); + assertResponse( + prepareSearch(randomFrom("*", "_all", "h*", "*index")).setSize(1000).setQuery(QueryBuilders.matchAllQuery()), + response -> { + boolean matchedHidden = Arrays.stream(response.getHits().getHits()).anyMatch(hit -> "hidden-index".equals(hit.getIndex())); + assertTrue(matchedHidden); + } + ); } public void testGlobalTemplatesDoNotApply() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java index 31368a3cfb8fd..71f0c75efa026 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexRequestBuilderIT.java @@ -28,13 +28,12 @@ public void testSetSource() throws InterruptedException, ExecutionException { Map map = new HashMap<>(); map.put("test_field", "foobar"); IndexRequestBuilder[] builders = new IndexRequestBuilder[] { - client().prepareIndex("test").setSource((Object) "test_field", (Object) "foobar"), - client().prepareIndex("test").setSource("{\"test_field\" : \"foobar\"}", XContentType.JSON), - client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), - client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), - client().prepareIndex("test") - .setSource(BytesReference.toBytes(new BytesArray("{\"test_field\" : \"foobar\"}")), XContentType.JSON), - client().prepareIndex("test").setSource(map) }; + prepareIndex("test").setSource((Object) "test_field", (Object) "foobar"), + prepareIndex("test").setSource("{\"test_field\" : \"foobar\"}", XContentType.JSON), + prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), + prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), + prepareIndex("test").setSource(BytesReference.toBytes(new BytesArray("{\"test_field\" : \"foobar\"}")), XContentType.JSON), + prepareIndex("test").setSource(map) }; indexRandom(true, builders); ElasticsearchAssertions.assertHitCount( prepareSearch("test").setQuery(QueryBuilders.termQuery("test_field", "foobar")), @@ -44,7 +43,7 @@ public void testSetSource() throws InterruptedException, ExecutionException { public void testOddNumberOfSourceObjects() { try { - client().prepareIndex("test").setSource("test_field", "foobar", new Object()); + prepareIndex("test").setSource("test_field", "foobar", new Object()); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("The number of object passed must be even but was [3]")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java index 9d94b9d0b41b0..fae08f8d5577e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java @@ -70,8 +70,7 @@ public void testIndexSort() { .putList("index.sort.field", "date", "numeric_dv", "keyword_dv") ).setMapping(TEST_MAPPING).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource("numeric_dv", randomInt(), "keyword_dv", randomAlphaOfLengthBetween(10, 20)) .get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java index ec03a740f8ade..ca2d2c60e23e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/WaitUntilRefreshIT.java @@ -56,11 +56,7 @@ public void createTestIndex() { } public void testIndex() { - DocWriteResponse index = client().prepareIndex("test") - .setId("1") - .setSource("foo", "bar") - .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) - .get(); + DocWriteResponse index = prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); assertEquals(RestStatus.CREATED, index.status()); assertFalse("request shouldn't have forced a refresh", index.forcedRefresh()); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); @@ -68,7 +64,7 @@ public void testIndex() { public void testDelete() throws InterruptedException, ExecutionException { // Index normally - indexRandom(true, client().prepareIndex("test").setId("1").setSource("foo", "bar")); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "bar")); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); // Now delete with blockUntilRefresh @@ -80,7 +76,7 @@ public void testDelete() throws InterruptedException, ExecutionException { public void testUpdate() throws InterruptedException, ExecutionException { // Index normally - indexRandom(true, client().prepareIndex("test").setId("1").setSource("foo", "bar")); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "bar")); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); // Update with RefreshPolicy.WAIT_UNTIL @@ -115,7 +111,7 @@ public void testUpdate() throws InterruptedException, ExecutionException { public void testBulk() { // Index by bulk with RefreshPolicy.WAIT_UNTIL BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); - bulk.add(client().prepareIndex("test").setId("1").setSource("foo", "bar")); + bulk.add(prepareIndex("test").setId("1").setSource("foo", "bar")); assertBulkSuccess(bulk.get()); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); @@ -143,16 +139,16 @@ public void testBulk() { */ public void testNoRefreshInterval() throws InterruptedException, ExecutionException { updateIndexSettings(Settings.builder().put("index.refresh_interval", -1), "test"); - ActionFuture index = client().prepareIndex("test") - .setId("1") + ActionFuture index = prepareIndex("test").setId("1") .setSource("foo", "bar") .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .execute(); while (false == index.isDone()) { indicesAdmin().prepareRefresh("test").get(); } - assertEquals(RestStatus.CREATED, index.get().status()); - assertFalse("request shouldn't have forced a refresh", index.get().forcedRefresh()); + var response = index.get(); + assertEquals(RestStatus.CREATED, response.status()); + assertFalse("request shouldn't have forced a refresh", response.forcedRefresh()); assertSearchHits(prepareSearch("test").setQuery(matchQuery("foo", "bar")), "1"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java index 71ae1704b5fed..b7d4ce18b15fe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java @@ -41,7 +41,7 @@ public void testMergesHappening() throws Exception { .source(jsonBuilder().startObject().field("l", randomLong()).endObject()) ); } - BulkResponse response = request.execute().actionGet(); + BulkResponse response = request.get(); refresh(); assertNoFailures(response); IndicesStatsResponse stats = indicesAdmin().prepareStats("test").setSegments(true).setMerge(true).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java index f297b61e7087d..ee165d1870571 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.engine; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -21,7 +20,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xcontent.XContentType; import org.junit.After; import org.junit.Before; @@ -32,6 +30,8 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -108,24 +108,20 @@ public void testMaxDocsLimit() throws Exception { ); assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) maxDocs.get())); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + response -> assertThat(response.getHits().getTotalHits().value, equalTo((long) maxDocs.get())) + ); if (randomBoolean()) { indicesAdmin().prepareFlush("test").get(); } internalCluster().fullRestart(); internalCluster().ensureAtLeastNumDataNodes(2); ensureGreen("test"); - searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) maxDocs.get())); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + response -> assertThat(response.getHits().getTotalHits().value, equalTo((long) maxDocs.get())) + ); } public void testMaxDocsLimitConcurrently() throws Exception { @@ -135,12 +131,10 @@ public void testMaxDocsLimitConcurrently() throws Exception { assertThat(indexingResult.numFailures, greaterThan(0)); assertThat(indexingResult.numSuccess, both(greaterThan(0)).and(lessThanOrEqualTo(maxDocs.get()))); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) indexingResult.numSuccess)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + indexingResult.numSuccess + ); int totalSuccess = indexingResult.numSuccess; while (totalSuccess < maxDocs.get()) { indexingResult = indexDocs(between(1, 10), between(1, 8)); @@ -152,12 +146,10 @@ public void testMaxDocsLimitConcurrently() throws Exception { assertThat(indexingResult.numSuccess, equalTo(0)); } indicesAdmin().prepareRefresh("test").get(); - searchResponse = prepareSearch("test").setQuery(new MatchAllQueryBuilder()) - .setTrackTotalHitsUpTo(Integer.MAX_VALUE) - .setSize(0) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) totalSuccess)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), + totalSuccess + ); } record IndexingResult(int numSuccess, int numFailures) {} @@ -173,7 +165,7 @@ static IndexingResult indexDocs(int numRequests, int numThreads) throws Exceptio phaser.arriveAndAwaitAdvance(); while (completedRequests.incrementAndGet() <= numRequests) { try { - final DocWriteResponse resp = client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + final DocWriteResponse resp = prepareIndex("test").setSource("{}", XContentType.JSON).get(); numSuccess.incrementAndGet(); assertThat(resp.status(), equalTo(RestStatus.CREATED)); } catch (IllegalArgumentException e) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java index 334462f3b757d..55e90d4398201 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/fielddata/FieldDataLoadingIT.java @@ -35,7 +35,7 @@ public void testEagerGlobalOrdinalsFieldDataLoading() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "name").get(); + prepareIndex("test").setId("1").setSource("name", "name").get(); indicesAdmin().prepareRefresh("test").get(); ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java index 9fd1e788eca8c..c1f06aeceebde 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/CopyToMapperIntegrationIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -22,6 +21,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -32,25 +32,25 @@ public void testDynamicTemplateCopyTo() throws Exception { int recordCount = between(1, 200); for (int i = 0; i < recordCount * 2; i++) { - client().prepareIndex("test-idx").setId(Integer.toString(i)).setSource("test_field", "test " + i, "even", i % 2 == 0).get(); + prepareIndex("test-idx").setId(Integer.toString(i)).setSource("test_field", "test " + i, "even", i % 2 == 0).get(); } - indicesAdmin().prepareRefresh("test-idx").execute().actionGet(); + indicesAdmin().prepareRefresh("test-idx").get(); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse response = prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("even", true)) - .addAggregation(AggregationBuilders.terms("test").field("test_field").size(recordCount * 2).collectMode(aggCollectionMode)) - .addAggregation( - AggregationBuilders.terms("test_raw").field("test_field_raw").size(recordCount * 2).collectMode(aggCollectionMode) - ) - .execute() - .actionGet(); - - assertThat(response.getHits().getTotalHits().value, equalTo((long) recordCount)); - - assertThat(((Terms) response.getAggregations().get("test")).getBuckets().size(), equalTo(recordCount + 1)); - assertThat(((Terms) response.getAggregations().get("test_raw")).getBuckets().size(), equalTo(recordCount)); - + assertResponse( + prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("even", true)) + .addAggregation(AggregationBuilders.terms("test").field("test_field").size(recordCount * 2).collectMode(aggCollectionMode)) + .addAggregation( + AggregationBuilders.terms("test_raw").field("test_field_raw").size(recordCount * 2).collectMode(aggCollectionMode) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) recordCount)); + + assertThat(((Terms) response.getAggregations().get("test")).getBuckets().size(), equalTo(recordCount + 1)); + assertThat(((Terms) response.getAggregations().get("test_raw")).getBuckets().size(), equalTo(recordCount)); + } + ); } public void testDynamicObjectCopyTo() throws Exception { @@ -65,8 +65,8 @@ public void testDynamicObjectCopyTo() throws Exception { .endObject() ); assertAcked(indicesAdmin().prepareCreate("test-idx").setMapping(mapping)); - client().prepareIndex("test-idx").setId("1").setSource("foo", "bar").get(); - indicesAdmin().prepareRefresh("test-idx").execute().actionGet(); + prepareIndex("test-idx").setId("1").setSource("foo", "bar").get(); + indicesAdmin().prepareRefresh("test-idx").get(); assertHitCount(prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("root.top.child", "bar")), 1L); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 38349e14bdf05..868540ac3e3f8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -64,9 +64,9 @@ protected Collection> nodePlugins() { public void testConflictingDynamicMappings() { // we don't use indexRandom because the order of requests is important here createIndex("index"); - client().prepareIndex("index").setId("1").setSource("foo", 3).get(); + prepareIndex("index").setId("1").setSource("foo", 3).get(); try { - client().prepareIndex("index").setId("2").setSource("foo", "bar").get(); + prepareIndex("index").setId("2").setSource("foo", "bar").get(); fail("Indexing request should have failed!"); } catch (DocumentParsingException e) { // general case, the parsing code complains that it can't parse "bar" as a "long" @@ -82,10 +82,10 @@ public void testConflictingDynamicMappings() { public void testConflictingDynamicMappingsBulk() { // we don't use indexRandom because the order of requests is important here createIndex("index"); - client().prepareIndex("index").setId("1").setSource("foo", 3).get(); - BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("index").setId("1").setSource("foo", 3)).get(); + prepareIndex("index").setId("1").setSource("foo", 3).get(); + BulkResponse bulkResponse = client().prepareBulk().add(prepareIndex("index").setId("1").setSource("foo", 3)).get(); assertFalse(bulkResponse.hasFailures()); - bulkResponse = client().prepareBulk().add(client().prepareIndex("index").setId("2").setSource("foo", "bar")).get(); + bulkResponse = client().prepareBulk().add(prepareIndex("index").setId("2").setSource("foo", "bar")).get(); assertTrue(bulkResponse.hasFailures()); } @@ -112,7 +112,7 @@ public void run() { startLatch.await(); assertEquals( DocWriteResponse.Result.CREATED, - client().prepareIndex("index").setId(id).setSource("field" + id, "bar").get().getResult() + prepareIndex("index").setId(id).setSource("field" + id, "bar").get().getResult() ); } catch (Exception e) { error.compareAndSet(null, e); @@ -163,7 +163,7 @@ public void testPreflightCheckAvoidsMaster() throws InterruptedException, IOExce XContentType.JSON ) .get(); - client().prepareIndex("index").setId("1").setSource("nested1", Map.of("foo", "bar"), "nested2", Map.of("foo", "bar")).get(); + prepareIndex("index").setId("1").setSource("nested1", Map.of("foo", "bar"), "nested2", Map.of("foo", "bar")).get(); final CountDownLatch masterBlockedLatch = new CountDownLatch(1); final CountDownLatch indexingCompletedLatch = new CountDownLatch(1); @@ -184,9 +184,7 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index") - .setId("2") - .setSource("nested3", Map.of("foo", "bar")); + final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("nested3", Map.of("foo", "bar")); try { assertThat( expectThrows(IllegalArgumentException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))).getMessage(), @@ -200,7 +198,7 @@ public void onFailure(Exception e) { public void testTotalFieldsLimitForDynamicMappingsUpdateCheckedAtDocumentParseTime() throws InterruptedException { createIndex("index", Settings.builder().put(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 2).build()); ensureGreen("index"); - client().prepareIndex("index").setId("1").setSource("field1", "value1").get(); + prepareIndex("index").setId("1").setSource("field1", "value1").get(); final CountDownLatch masterBlockedLatch = new CountDownLatch(1); final CountDownLatch indexingCompletedLatch = new CountDownLatch(1); @@ -221,7 +219,7 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index").setId("2").setSource("field2", "value2"); + final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("field2", "value2"); try { Exception e = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); assertThat(e.getMessage(), Matchers.containsString("failed to parse")); @@ -265,8 +263,7 @@ public void testTotalFieldsLimitWithRuntimeFields() { { // introduction of a new object with 2 new sub-fields fails - final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index1") - .setId("1") + final IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("1") .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); Exception exc = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); assertThat(exc.getMessage(), Matchers.containsString("failed to parse")); @@ -279,7 +276,7 @@ public void testTotalFieldsLimitWithRuntimeFields() { { // introduction of a new single field succeeds - client().prepareIndex("index1").setId("2").setSource("field3", "value3", "new_field4", 100).get(); + prepareIndex("index1").setId("2").setSource("field3", "value3", "new_field4", 100).get(); } { @@ -294,8 +291,7 @@ public void testTotalFieldsLimitWithRuntimeFields() { """, XContentType.JSON)); // introduction of a new object with 2 new sub-fields succeeds - client().prepareIndex("index1") - .setId("1") + prepareIndex("index1").setId("1") .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); } } @@ -304,7 +300,7 @@ public void testMappingVersionAfterDynamicMappingUpdate() throws Exception { createIndex("test"); final ClusterService clusterService = internalCluster().clusterService(); final long previousVersion = clusterService.state().metadata().index("test").getMappingVersion(); - client().prepareIndex("test").setId("1").setSource("field", "text").get(); + prepareIndex("test").setId("1").setSource("field", "text").get(); assertBusy(() -> assertThat(clusterService.state().metadata().index("test").getMappingVersion(), equalTo(1 + previousVersion))); } @@ -507,7 +503,7 @@ public void testDynamicRuntimeObjectFields() { Exception exception = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test").setSource("obj.runtime", "value").get() + () -> prepareIndex("test").setSource("obj.runtime", "value").get() ); assertThat( exception.getMessage(), @@ -539,8 +535,7 @@ public void testDynamicRuntimeObjectFields() { // target the same shard where we are sure the mapping update has been applied assertEquals( RestStatus.CREATED, - client().prepareIndex("test") - .setSource("obj.runtime.dynamic.number", 1) + prepareIndex("test").setSource("obj.runtime.dynamic.number", 1) .setId("id") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get() @@ -552,7 +547,7 @@ public void testDynamicRuntimeObjectFields() { // a doc with the same field but a different type causes a conflict Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test").setId("id").setSource("obj.runtime.dynamic.number", "string").get() + () -> prepareIndex("test").setId("id").setSource("obj.runtime.dynamic.number", "string").get() ); assertThat( e.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java index 25c33ee66bad4..a22910ab9c4eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; @@ -46,10 +45,10 @@ public void testMultiFields() throws Exception { assertThat(titleFields.get("not_analyzed"), notNullValue()); assertThat(((Map) titleFields.get("not_analyzed")).get("type").toString(), equalTo("keyword")); - client().prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch("my-index").setQuery(matchQuery("title", "multi")), 1); - assertHitCount(client().prepareSearch("my-index").setQuery(matchQuery("title.not_analyzed", "Multi fields")), 1); + assertHitCount(prepareSearch("my-index").setQuery(matchQuery("title", "multi")), 1); + assertHitCount(prepareSearch("my-index").setQuery(matchQuery("title.not_analyzed", "Multi fields")), 1); assertAcked(indicesAdmin().preparePutMapping("my-index").setSource(createPutMappingSource())); @@ -65,9 +64,9 @@ public void testMultiFields() throws Exception { assertThat(titleFields.get("uncased"), notNullValue()); assertThat(((Map) titleFields.get("uncased")).get("analyzer").toString(), equalTo("whitespace")); - client().prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("my-index").setId("1").setSource("title", "Multi fields").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch("my-index").setQuery(matchQuery("title.uncased", "Multi")), 1); + assertHitCount(prepareSearch("my-index").setQuery(matchQuery("title.uncased", "Multi")), 1); } @SuppressWarnings("unchecked") @@ -89,13 +88,13 @@ public void testGeoPointMultiField() throws Exception { assertThat(bField.get("type").toString(), equalTo("keyword")); GeoPoint point = new GeoPoint(51, 19); - client().prepareIndex("my-index").setId("1").setSource("a", point.toString()).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse countResponse = prepareSearch("my-index").setSize(0) - .setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS))) - .get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); - countResponse = prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); + prepareIndex("my-index").setId("1").setSource("a", point.toString()).setRefreshPolicy(IMMEDIATE).get(); + assertHitCount( + prepareSearch("my-index").setSize(0) + .setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS))), + 1L + ); + assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())), 1L); } @SuppressWarnings("unchecked") @@ -115,9 +114,8 @@ public void testCompletionMultiField() throws Exception { assertThat(bField.size(), equalTo(1)); assertThat(bField.get("type").toString(), equalTo("keyword")); - client().prepareIndex("my-index").setId("1").setSource("a", "complete me").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse countResponse = prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "complete me")).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); + prepareIndex("my-index").setId("1").setSource("a", "complete me").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "complete me")), 1L); } @SuppressWarnings("unchecked") @@ -137,9 +135,8 @@ public void testIpMultiField() throws Exception { assertThat(bField.size(), equalTo(1)); assertThat(bField.get("type").toString(), equalTo("keyword")); - client().prepareIndex("my-index").setId("1").setSource("a", "127.0.0.1").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse countResponse = prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "127.0.0.1")).get(); - assertThat(countResponse.getHits().getTotalHits().value, equalTo(1L)); + prepareIndex("my-index").setId("1").setSource("a", "127.0.0.1").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", "127.0.0.1")), 1L); } private XContentBuilder createMappingSource(String fieldType) throws IOException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index bb20ddd321d7c..66a35328954e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -32,7 +32,7 @@ public void setUp() throws Exception { super.setUp(); createIndex("test"); ensureGreen(); - client().prepareIndex("index").setId("1").setSource("field", "value").get(); + prepareIndex("index").setId("1").setSource("field", "value").get(); refresh(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java index 7751d5e7783b9..70983e5abfb96 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java @@ -55,8 +55,8 @@ public void testZeroTermsQuery() throws ExecutionException, InterruptedException private List getIndexRequests() { List requests = new ArrayList<>(); - requests.add(client().prepareIndex(INDEX).setSource("name", "the beatles")); - requests.add(client().prepareIndex(INDEX).setSource("name", "led zeppelin")); + requests.add(prepareIndex(INDEX).setSource("name", "the beatles")); + requests.add(prepareIndex(INDEX).setSource("name", "led zeppelin")); return requests; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index d1122004ccce2..9c4473297ba7b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -57,7 +57,7 @@ public void testGlobalCheckpointSyncWithAsyncDurability() throws Exception { for (int j = 0; j < 10; j++) { final String id = Integer.toString(j); - client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); } assertBusy(() -> { @@ -157,7 +157,7 @@ private void runGlobalCheckpointSyncTest( } for (int j = 0; j < numberOfDocuments; j++) { final String id = Integer.toString(index * numberOfDocuments + j); - client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); } try { barrier.await(); @@ -223,7 +223,7 @@ public void testPersistGlobalCheckpoint() throws Exception { } int numDocs = randomIntBetween(1, 20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } ensureGreen("test"); assertBusy(() -> { @@ -252,7 +252,7 @@ public void testPersistLocalCheckpoint() { logger.info("numDocs {}", numDocs); long maxSeqNo = 0; for (int i = 0; i < numDocs; i++) { - maxSeqNo = client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get().getSeqNo(); + maxSeqNo = prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get().getSeqNo(); logger.info("got {}", maxSeqNo); } for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java index 70adc75574437..b38198a98b5a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java @@ -63,7 +63,7 @@ public void accept(final long g, final Exception e) { } }, null); - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); assertBusy(() -> assertThat(globalCheckpoint.get(), equalTo((long) index))); // adding a listener expecting a lower global checkpoint should fire immediately final AtomicLong immediateGlobalCheckpint = new AtomicLong(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index f473015f864db..52bb5159c9b7d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -146,7 +146,7 @@ public void testLockTryingToDelete() throws Exception { public void testDurableFlagHasEffect() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); @@ -169,7 +169,7 @@ public void testDurableFlagHasEffect() { setDurability(shard, Translog.Durability.REQUEST); assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); assertTrue(needsSync.test(translog)); setDurability(shard, Translog.Durability.REQUEST); client().prepareDelete("test", "1").get(); @@ -181,7 +181,7 @@ public void testDurableFlagHasEffect() { setDurability(shard, Translog.Durability.REQUEST); assertNoFailures( client().prepareBulk() - .add(client().prepareIndex("test").setId("3").setSource("{}", XContentType.JSON)) + .add(prepareIndex("test").setId("3").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "1")) .get() ); @@ -190,7 +190,7 @@ public void testDurableFlagHasEffect() { setDurability(shard, Translog.Durability.ASYNC); assertNoFailures( client().prepareBulk() - .add(client().prepareIndex("test").setId("4").setSource("{}", XContentType.JSON)) + .add(prepareIndex("test").setId("4").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "3")) .get() ); @@ -220,7 +220,7 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { Settings idxSettings = Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, idxPath).build(); createIndex("test", idxSettings); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); assertHitCount(client().prepareSearch("test"), 1L); indicesAdmin().prepareDelete("test").get(); assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); @@ -230,7 +230,7 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { public void testExpectedShardSizeIsPresent() throws InterruptedException { assertAcked(indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0))); for (int i = 0; i < 50; i++) { - client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setSource("{}", XContentType.JSON).get(); } ensureGreen("test"); InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class); @@ -253,7 +253,7 @@ public void testIndexCanChangeCustomDataPath() throws Exception { logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath); createIndex(index, Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()).build()); - client().prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(index); assertHitCount(client().prepareSearch(index).setSize(0), 1L); @@ -328,11 +328,7 @@ public void testMaybeFlush() throws Exception { .build() ) .get(); - client().prepareIndex("test") - .setId("0") - .setSource("{}", XContentType.JSON) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) - .get(); + prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldPeriodicallyFlush()); shard.applyIndexOperationOnPrimary( Versions.MATCH_ANY, @@ -347,11 +343,7 @@ public void testMaybeFlush() throws Exception { final Translog translog = getTranslog(shard); assertEquals(2, translog.stats().getUncommittedOperations()); assertThat(shard.flushStats().getTotal(), equalTo(0L)); - client().prepareIndex("test") - .setId("2") - .setSource("{}", XContentType.JSON) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) - .get(); + prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(2L)); assertBusy(() -> { // this is async assertFalse(shard.shouldPeriodicallyFlush()); @@ -454,11 +446,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { settings = Settings.builder().put("index.translog.generation_threshold_size", "117b").build(); } indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get(); - client().prepareIndex("test") - .setId("0") - .setSource("{}", XContentType.JSON) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) - .get(); + prepareIndex("test").setId("0").setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldPeriodicallyFlush()); final AtomicBoolean running = new AtomicBoolean(true); final int numThreads = randomIntBetween(2, 4); @@ -481,7 +469,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { final CheckedRunnable check; if (flush) { final FlushStats initialStats = shard.flushStats(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); check = () -> { assertFalse(shard.shouldPeriodicallyFlush()); final FlushStats currentStats = shard.flushStats(); @@ -506,7 +494,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { }; } else { final long generation = getTranslog(shard).currentFileGeneration(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); check = () -> { assertFalse(shard.shouldRollTranslogGeneration()); assertEquals(generation + 1, getTranslog(shard).currentFileGeneration()); @@ -527,7 +515,7 @@ public void testFlushStats() throws Exception { indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get(); final int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } // A flush stats may include the new total count but the old period count - assert eventually. assertBusy(() -> { @@ -538,7 +526,7 @@ public void testFlushStats() throws Exception { settings = Settings.builder().put("index.translog.flush_threshold_size", (String) null).build(); indicesAdmin().prepareUpdateSettings("test").setSettings(settings).get(); - client().prepareIndex("test").setId(UUIDs.randomBase64UUID()).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(UUIDs.randomBase64UUID()).setSource("{}", XContentType.JSON).get(); indicesAdmin().prepareFlush("test").setForce(randomBoolean()).setWaitIfOngoing(true).get(); final FlushStats flushStats = indicesAdmin().prepareStats("test").clear().setFlush(true).get().getTotal().flush; assertThat(flushStats.getTotal(), greaterThan(flushStats.getPeriodic())); @@ -550,9 +538,9 @@ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); client().prepareDelete("test", "0").get(); - client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); CheckedFunction wrapper = directoryReader -> directoryReader; shard.close("simon says", false); @@ -666,7 +654,7 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti final SearchRequest countRequest = new SearchRequest("test").source(new SearchSourceBuilder().size(0)); final long numDocs = between(10, 20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); if (randomBoolean()) { shard.refresh("test"); } @@ -688,7 +676,7 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti final long moreDocs = between(10, 20); for (int i = 0; i < moreDocs; i++) { - client().prepareIndex("test").setId(Long.toString(i + numDocs)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Long.toString(i + numDocs)).setSource("{}", XContentType.JSON).get(); if (randomBoolean()) { shard.refresh("test"); } @@ -712,7 +700,7 @@ public void testShardChangesWithDefaultDocType() throws Exception { int numOps = between(1, 10); for (int i = 0; i < numOps; i++) { if (randomBoolean()) { - client().prepareIndex("index").setId(randomFrom("1", "2")).setSource("{}", XContentType.JSON).get(); + prepareIndex("index").setId(randomFrom("1", "2")).setSource("{}", XContentType.JSON).get(); } else { client().prepareDelete("index", randomFrom("1", "2")).get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index d57cbe50074ac..6c691c0a14440 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -119,7 +119,7 @@ public void testCorruptIndex() throws Exception { final int numExtraDocs = between(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numExtraDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } numDocs += numExtraDocs; @@ -282,7 +282,7 @@ public void testCorruptTranslogTruncation() throws Exception { logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); flush(indexName); @@ -293,7 +293,7 @@ public void testCorruptTranslogTruncation() throws Exception { logger.info("--> indexing [{}] more doc to be truncated", numDocsToTruncate); builders = new IndexRequestBuilder[numDocsToTruncate]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); @@ -472,7 +472,7 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { logger.info("--> indexing [{}] docs to be kept", numDocsToKeep); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); flush(indexName); @@ -482,7 +482,7 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { logger.info("--> indexing [{}] more docs to be truncated", numDocsToTruncate); builders = new IndexRequestBuilder[numDocsToTruncate]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex(indexName).setSource("foo", "bar"); + builders[i] = prepareIndex(indexName).setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); final int totalDocs = numDocsToKeep + numDocsToTruncate; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java index 22bb5974ad550..e1ab2bdc2369e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -43,6 +42,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class SearchIdleIT extends ESSingleNodeTestCase { @@ -92,10 +92,10 @@ private void runTestAutomaticRefresh(final IntToLongFunction count) throws Inter int numDocs = scaledRandomIntBetween(25, 100); totalNumDocs.set(numDocs); CountDownLatch indexingDone = new CountDownLatch(numDocs); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); indexingDone.countDown(); // one doc is indexed above blocking IndexShard shard = indexService.getShard(0); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); shard.scheduledRefresh(future); boolean hasRefreshed = future.actionGet(); if (randomTimeValue == TimeValue.ZERO) { @@ -125,7 +125,7 @@ private void runTestAutomaticRefresh(final IntToLongFunction count) throws Inter started.await(); assertThat(count.applyAsLong(totalNumDocs.get()), equalTo(1L)); for (int i = 1; i < numDocs; i++) { - client().prepareIndex("test").setId("" + i).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).execute(new ActionListener<>() { + prepareIndex("test").setId("" + i).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).execute(new ActionListener<>() { @Override public void onResponse(DocWriteResponse indexResponse) { indexingDone.countDown(); @@ -154,7 +154,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { IndexService indexService = createIndex("test", builder.build()); assertFalse(indexService.getIndexSettings().isExplicitRefresh()); ensureGreen(); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); IndexShard shard = indexService.getShard(0); scheduleRefresh(shard, false); assertTrue(shard.isSearchIdle()); @@ -162,7 +162,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { // async on purpose to make sure it happens concurrently indicesAdmin().prepareRefresh().execute(ActionListener.running(refreshLatch::countDown)); assertHitCount(client().prepareSearch(), 1); - client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); scheduleRefresh(shard, false); assertTrue(shard.hasRefreshPending()); @@ -179,7 +179,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { // We need to ensure a `scheduledRefresh` triggered by the internal refresh setting update is executed before we index a new doc; // otherwise, it will compete to call `Engine#maybeRefresh` with the `scheduledRefresh` that we are going to verify. ensureNoPendingScheduledRefresh(indexService.getThreadPool()); - client().prepareIndex("test").setId("2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); scheduleRefresh(shard, true); assertFalse(shard.hasRefreshPending()); assertTrue(shard.isSearchIdle()); @@ -193,7 +193,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { } private static void scheduleRefresh(IndexShard shard, boolean expectRefresh) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); shard.scheduledRefresh(future); assertThat(future.actionGet(), equalTo(expectRefresh)); } @@ -279,15 +279,13 @@ public void testSearchIdleBoolQueryMatchOneIndex() throws InterruptedException { assertEquals( RestStatus.CREATED, - client().prepareIndex(idleIndex) - .setSource("keyword", "idle", "@timestamp", "2021-05-10T19:00:03.765Z", "routing_field", "aaa") + prepareIndex(idleIndex).setSource("keyword", "idle", "@timestamp", "2021-05-10T19:00:03.765Z", "routing_field", "aaa") .get() .status() ); assertEquals( RestStatus.CREATED, - client().prepareIndex(activeIndex) - .setSource("keyword", "active", "@timestamp", "2021-05-12T20:07:12.112Z", "routing_field", "aaa") + prepareIndex(activeIndex).setSource("keyword", "active", "@timestamp", "2021-05-12T20:07:12.112Z", "routing_field", "aaa") .get() .status() ); @@ -306,18 +304,20 @@ public void testSearchIdleBoolQueryMatchOneIndex() throws InterruptedException { assertIdleShard(activeIndexStatsBefore); // WHEN - final SearchResponse searchResponse = client().prepareSearch("test*") - .setQuery(new RangeQueryBuilder("@timestamp").from("2021-05-12T20:00:00.000Z").to("2021-05-12T21:00:00.000Z")) - .setPreFilterShardSize(5) - .get(); - - // THEN - assertEquals(RestStatus.OK, searchResponse.status()); - assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); - // NOTE: we need an empty result from at least one shard - assertEquals(1, searchResponse.getHits().getHits().length); + assertResponse( + client().prepareSearch("test*") + .setQuery(new RangeQueryBuilder("@timestamp").from("2021-05-12T20:00:00.000Z").to("2021-05-12T21:00:00.000Z")) + .setPreFilterShardSize(5), + response -> { + // THEN + assertEquals(RestStatus.OK, response.status()); + assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, response.getSkippedShards()); + assertEquals(0, response.getFailedShards()); + Arrays.stream(response.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + // NOTE: we need an empty result from at least one shard + assertEquals(1, response.getHits().getHits().length); + } + ); final IndicesStatsResponse idleIndexStatsAfter = indicesAdmin().prepareStats(idleIndex).get(); assertIdleShardsRefreshStats(idleIndexStatsBefore, idleIndexStatsAfter); } @@ -351,11 +351,8 @@ public void testSearchIdleExistsQueryMatchOneIndex() throws InterruptedException "type=keyword" ); - assertEquals(RestStatus.CREATED, client().prepareIndex(idleIndex).setSource("keyword", "idle").get().status()); - assertEquals( - RestStatus.CREATED, - client().prepareIndex(activeIndex).setSource("keyword", "active", "unmapped", "bbb").get().status() - ); + assertEquals(RestStatus.CREATED, prepareIndex(idleIndex).setSource("keyword", "idle").get().status()); + assertEquals(RestStatus.CREATED, prepareIndex(activeIndex).setSource("keyword", "active", "unmapped", "bbb").get().status()); assertEquals(RestStatus.OK, indicesAdmin().prepareRefresh(idleIndex, activeIndex).get().getStatus()); waitUntil( @@ -371,18 +368,15 @@ public void testSearchIdleExistsQueryMatchOneIndex() throws InterruptedException assertIdleShard(activeIndexStatsBefore); // WHEN - final SearchResponse searchResponse = client().prepareSearch("test*") - .setQuery(new ExistsQueryBuilder("unmapped")) - .setPreFilterShardSize(5) - .get(); - - // THEN - assertEquals(RestStatus.OK, searchResponse.status()); - assertEquals(idleIndexShardsCount, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); - // NOTE: we need an empty result from at least one shard - assertEquals(1, searchResponse.getHits().getHits().length); + assertResponse(client().prepareSearch("test*").setQuery(new ExistsQueryBuilder("unmapped")).setPreFilterShardSize(5), response -> { + // THEN + assertEquals(RestStatus.OK, response.status()); + assertEquals(idleIndexShardsCount, response.getSkippedShards()); + assertEquals(0, response.getFailedShards()); + Arrays.stream(response.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + // NOTE: we need an empty result from at least one shard + assertEquals(1, response.getHits().getHits().length); + }); final IndicesStatsResponse idleIndexStatsAfter = indicesAdmin().prepareStats(idleIndex).get(); assertIdleShardsRefreshStats(idleIndexStatsBefore, idleIndexStatsAfter); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 8de218f8a29c8..ec79b53ccd174 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -158,7 +158,7 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException disableAllocation("test"); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); @@ -269,7 +269,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); @@ -402,11 +402,11 @@ public void testCorruptionOnNetworkLayer() throws InterruptedException { ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog assertHitCount(prepareSearch().setSize(0), numDocs); @@ -541,11 +541,11 @@ public void testCorruptFileThenSnapshotAndRestore() throws InterruptedException, ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog assertHitCount(prepareSearch().setSize(0), numDocs); @@ -608,11 +608,11 @@ public void testReplicaCorruption() throws Exception { ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("field", "value"); + builders[i] = prepareIndex("test").setSource("field", "value"); } indexRandom(true, builders); ensureGreen(); - assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareFlush().setForce(true).get()); // we have to flush at least once here since we don't corrupt the translog assertHitCount(prepareSearch().setSize(0), numDocs); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index b8ecbc2e750af..d8d9ef47d4451 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -59,7 +59,7 @@ public void testCorruptTranslogFiles() throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[scaledRandomIntBetween(100, 1000)]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setSource("foo", "bar"); + builders[i] = prepareIndex("test").setSource("foo", "bar"); } indexRandom(false, false, false, Arrays.asList(builders)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 19efcd9e3f31f..423e5c14c472a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.TransportShardBulkAction; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.plugins.Plugin; @@ -39,7 +38,8 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -94,9 +94,9 @@ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, I bulkBuilder.add(client.prepareIndex("index").setSource(doc)); } - BulkResponse response = bulkBuilder.get(); - if (response.hasFailures()) { - for (BulkItemResponse singleIndexRespons : response.getItems()) { + BulkResponse bulkResponse = bulkBuilder.get(); + if (bulkResponse.hasFailures()) { + for (BulkItemResponse singleIndexRespons : bulkResponse.getItems()) { if (singleIndexRespons.isFailed()) { fail("None of the bulk items should fail but got " + singleIndexRespons.getFailureMessage()); } @@ -104,41 +104,42 @@ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, I } refresh(); - SearchResponse searchResponse = prepareSearch("index").setSize(numDocs * 2).addStoredField("_id").get(); - - Set uniqueIds = new HashSet<>(); - long dupCounter = 0; - boolean found_duplicate_already = false; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - if (uniqueIds.add(searchResponse.getHits().getHits()[i].getId()) == false) { - if (found_duplicate_already == false) { - SearchResponse dupIdResponse = prepareSearch("index").setQuery( - termQuery("_id", searchResponse.getHits().getHits()[i].getId()) - ).setExplain(true).get(); - assertThat(dupIdResponse.getHits().getTotalHits().value, greaterThan(1L)); - logger.info("found a duplicate id:"); - for (SearchHit hit : dupIdResponse.getHits()) { - logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId()); + assertNoFailuresAndResponse(prepareSearch("index").setSize(numDocs * 2).addStoredField("_id"), response -> { + Set uniqueIds = new HashSet<>(); + long dupCounter = 0; + boolean found_duplicate_already = false; + for (int i = 0; i < response.getHits().getHits().length; i++) { + if (uniqueIds.add(response.getHits().getHits()[i].getId()) == false) { + if (found_duplicate_already == false) { + assertResponse( + prepareSearch("index").setQuery(termQuery("_id", response.getHits().getHits()[i].getId())).setExplain(true), + dupIdResponse -> { + assertThat(dupIdResponse.getHits().getTotalHits().value, greaterThan(1L)); + logger.info("found a duplicate id:"); + for (SearchHit hit : dupIdResponse.getHits()) { + logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId()); + } + logger.info("will not print anymore in case more duplicates are found."); + } + ); + found_duplicate_already = true; } - logger.info("will not print anymore in case more duplicates are found."); - found_duplicate_already = true; + dupCounter++; } - dupCounter++; } - } - assertNoFailures(searchResponse); - assertThat(dupCounter, equalTo(0L)); - assertHitCount(searchResponse, numDocs); - IndicesStatsResponse index = indicesAdmin().prepareStats("index").clear().setSegments(true).get(); - IndexStats indexStats = index.getIndex("index"); - long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; - for (IndexShardStats indexShardStats : indexStats) { - for (ShardStats shardStats : indexShardStats) { - SegmentsStats segments = shardStats.getStats().getSegments(); - maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp()); + assertThat(dupCounter, equalTo(0L)); + assertHitCount(response, numDocs); + IndicesStatsResponse index = indicesAdmin().prepareStats("index").clear().setSegments(true).get(); + IndexStats indexStats = index.getIndex("index"); + long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; + for (IndexShardStats indexShardStats : indexStats) { + for (ShardStats shardStats : indexShardStats) { + SegmentsStats segments = shardStats.getStats().getSegments(); + maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp()); + } } - } - assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get()); - assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1); + assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get()); + assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index 95846fcb55594..143ffedeefc55 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -28,6 +27,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -42,7 +42,7 @@ protected int numberOfReplicas() { public void testSimpleStats() throws Exception { // clear all stats first - indicesAdmin().prepareStats().clear().execute().actionGet(); + indicesAdmin().prepareStats().clear().get(); final int numNodes = cluster().numDataNodes(); assertThat(numNodes, greaterThanOrEqualTo(2)); final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard... @@ -66,20 +66,26 @@ public void testSimpleStats() throws Exception { long startTime = System.currentTimeMillis(); for (int i = 0; i < suggestAllIdx; i++) { - SearchResponse suggestResponse = addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch(), i).get(); - assertAllSuccessful(suggestResponse); + assertResponse( + addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch(), i), + response -> assertAllSuccessful(response) + ); } for (int i = 0; i < suggestIdx1; i++) { - SearchResponse suggestResponse = addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test1"), i).get(); - assertAllSuccessful(suggestResponse); + assertResponse( + addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test1"), i), + response -> assertAllSuccessful(response) + ); } for (int i = 0; i < suggestIdx2; i++) { - SearchResponse suggestResponse = addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test2"), i).get(); - assertAllSuccessful(suggestResponse); + assertResponse( + addSuggestions(internalCluster().coordOnlyNodeClient().prepareSearch("test2"), i), + response -> assertAllSuccessful(response) + ); } long endTime = System.currentTimeMillis(); - IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().execute().actionGet(); + IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().get(); final SearchStats.Stats suggest = indicesStats.getTotal().getSearch().getTotal(); // check current @@ -105,7 +111,7 @@ public void testSimpleStats() throws Exception { // the upperbound is num shards * total time since we do searches in parallel assertThat(suggest.getSuggestTimeInMillis(), lessThanOrEqualTo(totalShards * (endTime - startTime))); - NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().execute().actionGet(); + NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get(); Set nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2"); int num = 0; for (NodeStats stat : nodeStats.getNodes()) { @@ -138,7 +144,7 @@ private SearchRequestBuilder addSuggestions(SearchRequestBuilder request, int i) } private Set nodeIdsWithIndex(String... indices) { - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); Set nodes = new HashSet<>(); for (ShardIterator shardIterator : allAssignedShardsGrouped) { @@ -153,7 +159,7 @@ private Set nodeIdsWithIndex(String... indices) { } protected int numAssignedShards(String... indices) { - ClusterState state = clusterAdmin().prepareState().execute().actionGet().getState(); + ClusterState state = clusterAdmin().prepareState().get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java index e3c66f3dabfdf..5f1b1ab81b9da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParsingException; @@ -28,6 +27,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicIntegerArray; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -47,7 +47,7 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { logger.info("indexing [{}] docs", numOfDocs); List builders = new ArrayList<>(numOfDocs); for (int j = 0; j < numOfDocs; j++) { - builders.add(client().prepareIndex("test").setSource("field", "value_" + j)); + builders.add(prepareIndex("test").setSource("field", "value_" + j)); } indexRandom(true, builders); logger.info("verifying indexed content"); @@ -55,17 +55,18 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { for (int j = 0; j < numOfChecks; j++) { try { logger.debug("running search with all types"); - SearchResponse response = prepareSearch("test").get(); - if (response.getHits().getTotalHits().value != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } + assertResponse(prepareSearch("test"), response -> { + if (response.getHits().getTotalHits().value != numOfDocs) { + final String message = "Count is " + + response.getHits().getTotalHits().value + + " but " + + numOfDocs + + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } + }); } catch (Exception e) { logger.error("search for all docs types failed", e); if (firstError == null) { @@ -74,17 +75,18 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { } try { logger.debug("running search with a specific type"); - SearchResponse response = prepareSearch("test").get(); - if (response.getHits().getTotalHits().value != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } + assertResponse(prepareSearch("test"), response -> { + if (response.getHits().getTotalHits().value != numOfDocs) { + final String message = "Count is " + + response.getHits().getTotalHits().value + + " but " + + numOfDocs + + " was expected. " + + ElasticsearchAssertions.formatShardStatus(response); + logger.error("{}. search response: \n{}", message, response); + fail(message); + } + }); } catch (Exception e) { logger.error("search for all docs of a specific type failed", e); if (firstError == null) { @@ -103,15 +105,15 @@ public void testCreatedFlag() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").execute().actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").execute().actionGet(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").get(); assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult()); - client().prepareDelete("test", "1").execute().actionGet(); + client().prepareDelete("test", "1").get(); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").execute().actionGet(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } @@ -120,14 +122,14 @@ public void testCreatedFlagWithFlush() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").execute().actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - client().prepareDelete("test", "1").execute().actionGet(); + client().prepareDelete("test", "1").get(); flush(); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").execute().actionGet(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } @@ -169,13 +171,11 @@ public void testCreatedFlagWithExternalVersioning() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(123) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } @@ -183,10 +183,7 @@ public void testCreateFlagWithBulk() { createIndex("test"); ensureGreen(); - BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("1").setSource("field1", "value1_1")) - .execute() - .actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(prepareIndex("test").setId("1").setSource("field1", "value1_1")).get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(1)); IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse(); @@ -208,7 +205,7 @@ public void testCreateIndexWithLongName() { } try { - client().prepareIndex(randomAlphaOfLengthBetween(min, max).toLowerCase(Locale.ROOT)).setSource("foo", "bar").get(); + prepareIndex(randomAlphaOfLengthBetween(min, max).toLowerCase(Locale.ROOT)).setSource("foo", "bar").get(); fail("exception should have been thrown on too-long index name"); } catch (InvalidIndexNameException e) { assertThat( @@ -220,7 +217,7 @@ public void testCreateIndexWithLongName() { try { // Catch chars that are more than a single byte - client().prepareIndex( + prepareIndex( randomAlphaOfLength(MetadataCreateIndexService.MAX_INDEX_NAME_BYTES - 1).toLowerCase(Locale.ROOT) + "Ϟ".toLowerCase( Locale.ROOT ) @@ -263,9 +260,7 @@ public void testInvalidIndexName() { } public void testDocumentWithBlankFieldName() { - Exception e = expectThrows(DocumentParsingException.class, () -> { - client().prepareIndex("test").setId("1").setSource("", "value1_2").execute().actionGet(); - }); + Exception e = expectThrows(DocumentParsingException.class, () -> prepareIndex("test").setId("1").setSource("", "value1_2").get()); assertThat(e.getMessage(), containsString("failed to parse")); assertThat(e.getCause().getMessage(), containsString("field name cannot be an empty string")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 9cef9becd6aaf..28a5ad9c29126 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -79,7 +79,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { final String node2 = getLocalNodeId(server_2); // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join) - clusterAdmin().prepareReroute().execute().actionGet(); + clusterAdmin().prepareReroute().get(); clusterHealth = clusterAdmin().health( new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true) @@ -120,7 +120,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { final String node3 = getLocalNodeId(server_3); // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join) - clusterAdmin().prepareReroute().execute().actionGet(); + clusterAdmin().prepareReroute().get(); clusterHealth = clusterAdmin().prepareHealth() .setWaitForGreenStatus() @@ -203,7 +203,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { logger.info("Deleting index [test]"); // last, lets delete the index - AcknowledgedResponse deleteIndexResponse = indicesAdmin().prepareDelete("test").execute().actionGet(); + AcknowledgedResponse deleteIndexResponse = indicesAdmin().prepareDelete("test").get(); assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true)); clusterState = clusterAdmin().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java index 211e34c99ec23..67e8d2fd75d65 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java @@ -12,12 +12,6 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.index.IndexNotFoundException; @@ -29,8 +23,10 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.Locale; +import java.util.function.Consumer; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -51,21 +47,25 @@ public void setNow() { * of failing when index resolution with `now` is one day off, this method wraps calls with the assumption that * the day did not change during the test run. */ - public R dateSensitiveGet(ActionRequestBuilder builder) { + public void dateSensitiveGet( + ActionRequestBuilder builder, + Consumer consumer + ) { Runnable dayChangeAssumption = () -> assumeTrue( "day changed between requests", ZonedDateTime.now(ZoneOffset.UTC).getDayOfYear() == now.getDayOfYear() ); - R response; try { - response = builder.get(); + assertResponse(builder, response -> { + dayChangeAssumption.run(); + consumer.accept(response); + }); } catch (IndexNotFoundException e) { // index resolver throws this if it does not find the exact index due to day changes dayChangeAssumption.run(); throw e; } - dayChangeAssumption.run(); - return response; + } public void testIndexNameDateMathExpressions() { @@ -74,61 +74,69 @@ public void testIndexNameDateMathExpressions() { String index3 = ".marvel-" + DateTimeFormatter.ofPattern("yyyy.MM.dd", Locale.ROOT).format(now.minusDays(2)); createIndex(index1, index2, index3); - GetSettingsResponse getSettingsResponse = dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3)); - assertEquals(index1, getSettingsResponse.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(index2, getSettingsResponse.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(index3, getSettingsResponse.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3), response -> { + assertEquals(index1, response.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(index2, response.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(index3, response.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + }); String dateMathExp1 = "<.marvel-{now/d}>"; String dateMathExp2 = "<.marvel-{now/d-1d}>"; String dateMathExp3 = "<.marvel-{now/d-2d}>"; - client().prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); refresh(); - SearchResponse searchResponse = dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3)); - assertHitCount(searchResponse, 3); - assertSearchHits(searchResponse, "1", "2", "3"); - - GetResponse getResponse = dateSensitiveGet(client().prepareGet(dateMathExp1, "1")); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo("1")); - - getResponse = dateSensitiveGet(client().prepareGet(dateMathExp2, "2")); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo("2")); - - getResponse = dateSensitiveGet(client().prepareGet(dateMathExp3, "3")); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo("3")); - - MultiGetResponse mgetResponse = dateSensitiveGet( - client().prepareMultiGet().add(dateMathExp1, "1").add(dateMathExp2, "2").add(dateMathExp3, "3") - ); - assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true)); - assertThat(mgetResponse.getResponses()[0].getResponse().getId(), equalTo("1")); - assertThat(mgetResponse.getResponses()[1].getResponse().isExists(), is(true)); - assertThat(mgetResponse.getResponses()[1].getResponse().getId(), equalTo("2")); - assertThat(mgetResponse.getResponses()[2].getResponse().isExists(), is(true)); - assertThat(mgetResponse.getResponses()[2].getResponse().getId(), equalTo("3")); - - IndicesStatsResponse indicesStatsResponse = dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3)); - assertThat(indicesStatsResponse.getIndex(index1), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index2), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index3), notNullValue()); - - DeleteResponse deleteResponse = dateSensitiveGet(client().prepareDelete(dateMathExp1, "1")); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - assertThat(deleteResponse.getId(), equalTo("1")); - - deleteResponse = dateSensitiveGet(client().prepareDelete(dateMathExp2, "2")); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - assertThat(deleteResponse.getId(), equalTo("2")); - - deleteResponse = dateSensitiveGet(client().prepareDelete(dateMathExp3, "3")); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - assertThat(deleteResponse.getId(), equalTo("3")); + dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertHitCount(response, 3); + assertSearchHits(response, "1", "2", "3"); + }); + + dateSensitiveGet(client().prepareGet(dateMathExp1, "1"), response -> { + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("1")); + }); + + dateSensitiveGet(client().prepareGet(dateMathExp2, "2"), response -> { + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("2")); + }); + + dateSensitiveGet(client().prepareGet(dateMathExp3, "3"), response -> { + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("3")); + }); + + dateSensitiveGet(client().prepareMultiGet().add(dateMathExp1, "1").add(dateMathExp2, "2").add(dateMathExp3, "3"), response -> { + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), equalTo("1")); + assertThat(response.getResponses()[1].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[1].getResponse().getId(), equalTo("2")); + assertThat(response.getResponses()[2].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[2].getResponse().getId(), equalTo("3")); + }); + + dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertThat(response.getIndex(index1), notNullValue()); + assertThat(response.getIndex(index2), notNullValue()); + assertThat(response.getIndex(index3), notNullValue()); + }); + + dateSensitiveGet(client().prepareDelete(dateMathExp1, "1"), response -> { + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + assertThat(response.getId(), equalTo("1")); + }); + + dateSensitiveGet(client().prepareDelete(dateMathExp2, "2"), response -> { + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + assertThat(response.getId(), equalTo("2")); + }); + + dateSensitiveGet(client().prepareDelete(dateMathExp3, "3"), response -> { + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + assertThat(response.getId(), equalTo("3")); + }); } public void testAutoCreateIndexWithDateMathExpression() { @@ -139,19 +147,21 @@ public void testAutoCreateIndexWithDateMathExpression() { String dateMathExp1 = "<.marvel-{now/d}>"; String dateMathExp2 = "<.marvel-{now/d-1d}>"; String dateMathExp3 = "<.marvel-{now/d-2d}>"; - client().prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); + prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); refresh(); - SearchResponse searchResponse = dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3)); - assertHitCount(searchResponse, 3); - assertSearchHits(searchResponse, "1", "2", "3"); + dateSensitiveGet(prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertHitCount(response, 3); + assertSearchHits(response, "1", "2", "3"); + }); - IndicesStatsResponse indicesStatsResponse = dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3)); - assertThat(indicesStatsResponse.getIndex(index1), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index2), notNullValue()); - assertThat(indicesStatsResponse.getIndex(index3), notNullValue()); + dateSensitiveGet(indicesAdmin().prepareStats(dateMathExp1, dateMathExp2, dateMathExp3), response -> { + assertThat(response.getIndex(index1), notNullValue()); + assertThat(response.getIndex(index2), notNullValue()); + assertThat(response.getIndex(index3), notNullValue()); + }); } public void testCreateIndexWithDateMathExpression() { @@ -164,15 +174,15 @@ public void testCreateIndexWithDateMathExpression() { String dateMathExp3 = "<.marvel-{now/d-2d}>"; createIndex(dateMathExp1, dateMathExp2, dateMathExp3); - GetSettingsResponse getSettingsResponse = dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3)); - assertEquals(dateMathExp1, getSettingsResponse.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(dateMathExp2, getSettingsResponse.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); - assertEquals(dateMathExp3, getSettingsResponse.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + dateSensitiveGet(indicesAdmin().prepareGetSettings(index1, index2, index3), response -> { + assertEquals(dateMathExp1, response.getSetting(index1, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(dateMathExp2, response.getSetting(index2, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + assertEquals(dateMathExp3, response.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); + }); ClusterState clusterState = clusterAdmin().prepareState().get().getState(); assertThat(clusterState.metadata().index(index1), notNullValue()); assertThat(clusterState.metadata().index(index2), notNullValue()); assertThat(clusterState.metadata().index(index3), notNullValue()); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index db5578ee6e60b..3dd9feff9ce25 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -96,10 +96,10 @@ public void testDeletesAloneCanTriggerRefresh() throws Exception { IndexService indexService = createIndex("index", indexSettings(1, 0).put("index.refresh_interval", -1).build()); IndexShard shard = indexService.getShard(0); for (int i = 0; i < 100; i++) { - client().prepareIndex("index").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("index").setId(Integer.toString(i)).setSource("field", "value").get(); } // Force merge so we know all merges are done before we start deleting: - ForceMergeResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); + ForceMergeResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).get(); assertNoFailures(r); final RefreshStats refreshStats = shard.refreshStats(); for (int i = 0; i < 100; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 7bedd163c2530..ce3fd98476725 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.search.MultiSearchRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -45,6 +44,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -308,7 +308,7 @@ public void testWildcardBehaviour() throws Exception { verify(getSettings(indices).setIndicesOptions(options), false); assertAcked(prepareCreate("foobar")); - client().prepareIndex("foobar").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("foobar").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); // Verify defaults for wildcards, with one wildcard expression and one existing index indices = new String[] { "foo*" }; @@ -394,7 +394,7 @@ public void testWildcardBehaviourSnapshotRestore() throws Exception { public void testAllMissingLenient() throws Exception { createIndex("test1"); - client().prepareIndex("test1").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test1").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); assertHitCount(prepareSearch("test2").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(matchAllQuery()), 0L); assertHitCount(prepareSearch("test2", "test3").setQuery(matchAllQuery()).setIndicesOptions(IndicesOptions.lenientExpandOpen()), 0L); // you should still be able to run empty searches without things blowing up @@ -403,12 +403,12 @@ public void testAllMissingLenient() throws Exception { public void testAllMissingStrict() throws Exception { createIndex("test1"); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2").setQuery(matchAllQuery()).execute().actionGet()); + expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2").setQuery(matchAllQuery()).get()); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2", "test3").setQuery(matchAllQuery()).execute().actionGet()); + expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2", "test3").setQuery(matchAllQuery()).get()); // you should still be able to run empty searches without things blowing up - prepareSearch().setQuery(matchAllQuery()).execute().actionGet(); + prepareSearch().setQuery(matchAllQuery()).get(); } // For now don't handle closed indices @@ -674,10 +674,11 @@ private static void verify(ActionRequestBuilder requestBuilder, boolean fa private static void verify(ActionRequestBuilder requestBuilder, boolean fail, long expectedCount) { if (fail) { if (requestBuilder instanceof MultiSearchRequestBuilder multiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); - assertThat(multiSearchResponse.getResponses().length, equalTo(1)); - assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); - assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue()); + assertResponse(multiSearchRequestBuilder, multiSearchResponse -> { + assertThat(multiSearchResponse.getResponses().length, equalTo(1)); + assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); + assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue()); + }); } else { try { requestBuilder.get(); @@ -688,9 +689,10 @@ private static void verify(ActionRequestBuilder requestBuilder, boolean fa if (requestBuilder instanceof SearchRequestBuilder searchRequestBuilder) { assertHitCount(searchRequestBuilder, expectedCount); } else if (requestBuilder instanceof MultiSearchRequestBuilder multiSearchRequestBuilder) { - MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); - assertThat(multiSearchResponse.getResponses().length, equalTo(1)); - assertThat(multiSearchResponse.getResponses()[0].getResponse(), notNullValue()); + assertResponse(multiSearchRequestBuilder, response -> { + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse(), notNullValue()); + }); } else { requestBuilder.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index b10d4147af25c..0b99e3ba3ffcf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -35,7 +34,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -59,23 +59,8 @@ public void testCacheAggs() throws Exception { // This is not a random example: serialization with time zones writes shared strings // which used to not work well with the query cache because of the handles stream output // see #9500 - final SearchResponse r1 = client.prepareSearch("index") - .setSize(0) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation( - dateHistogram("histo").field("f").timeZone(ZoneId.of("+01:00")).minDocCount(0).calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r1); - - // The cached is actually used - assertThat( - indicesAdmin().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), - greaterThan(0L) - ); - - for (int i = 0; i < 10; ++i) { - final SearchResponse r2 = client.prepareSearch("index") + assertNoFailuresAndResponse( + client.prepareSearch("index") .setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation( @@ -83,21 +68,42 @@ public void testCacheAggs() throws Exception { .timeZone(ZoneId.of("+01:00")) .minDocCount(0) .calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r2); - Histogram h1 = r1.getAggregations().get("histo"); - Histogram h2 = r2.getAggregations().get("histo"); - final List buckets1 = h1.getBuckets(); - final List buckets2 = h2.getBuckets(); - assertEquals(buckets1.size(), buckets2.size()); - for (int j = 0; j < buckets1.size(); ++j) { - final Bucket b1 = buckets1.get(j); - final Bucket b2 = buckets2.get(j); - assertEquals(b1.getKey(), b2.getKey()); - assertEquals(b1.getDocCount(), b2.getDocCount()); + ), + r1 -> { + // The cached is actually used + assertThat( + indicesAdmin().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), + greaterThan(0L) + ); + + for (int i = 0; i < 10; ++i) { + assertNoFailuresAndResponse( + client.prepareSearch("index") + .setSize(0) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + dateHistogram("histo").field("f") + .timeZone(ZoneId.of("+01:00")) + .minDocCount(0) + .calendarInterval(DateHistogramInterval.MONTH) + ), + r2 -> { + Histogram h1 = r1.getAggregations().get("histo"); + Histogram h2 = r2.getAggregations().get("histo"); + final List buckets1 = h1.getBuckets(); + final List buckets2 = h2.getBuckets(); + assertEquals(buckets1.size(), buckets2.size()); + for (int j = 0; j < buckets1.size(); ++j) { + final Bucket b1 = buckets1.get(j); + final Bucket b2 = buckets2.get(j); + assertEquals(b1.getKey(), b2.getKey()); + assertEquals(b1.getDocCount(), b2.getDocCount()); + } + } + ); + } } - } + ); } public void testQueryRewrite() throws Exception { @@ -133,35 +139,43 @@ public void testQueryRewrite() throws Exception { assertCacheState(client, "index", 0, 0); - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) - // to ensure that query is executed even if it rewrites to match_no_docs - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) + // to ensure that query is executed even if it rewrites to match_no_docs + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 5); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 3, 7); - - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 6, 9); } @@ -195,31 +209,40 @@ public void testQueryRewriteMissingValues() throws Exception { assertCacheState(client, "index", 0, 0); - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index", 0, 1); - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index", 1, 1); - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index", 2, 1); } @@ -253,35 +276,44 @@ public void testQueryRewriteDates() throws Exception { assertCacheState(client, "index", 0, 0); - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) - // to ensure that query is executed even if it rewrites to match_no_docs - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(9L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + // to ensure that query is executed even if it rewrites to match_no_docs + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(9L)); + } + ); assertCacheState(client, "index", 0, 1); - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(9L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(9L)); + } + ); assertCacheState(client, "index", 1, 1); - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) - .addAggregation(new GlobalAggregationBuilder("global")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(9L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + .addAggregation(new GlobalAggregationBuilder("global")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(9L)); + } + ); assertCacheState(client, "index", 2, 1); } @@ -324,13 +356,16 @@ public void testQueryRewriteDatesWithNow() throws Exception { assertCacheState(client, "index-2", 0, 0); assertCacheState(client, "index-3", 0, 0); - final SearchResponse r1 = client.prepareSearch("index-*") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index-*") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index-1", 0, 1); assertCacheState(client, "index-2", 0, 1); // Because the query will INTERSECT with the 3rd index it will not be @@ -338,24 +373,30 @@ public void testQueryRewriteDatesWithNow() throws Exception { // cache miss or cache hit since queries containing now can't be cached assertCacheState(client, "index-3", 0, 0); - final SearchResponse r2 = client.prepareSearch("index-*") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index-*") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index-1", 1, 1); assertCacheState(client, "index-2", 1, 1); assertCacheState(client, "index-3", 0, 0); - final SearchResponse r3 = client.prepareSearch("index-*") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(8L)); + assertResponse( + client.prepareSearch("index-*") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + } + ); assertCacheState(client, "index-1", 2, 1); assertCacheState(client, "index-2", 2, 1); assertCacheState(client, "index-3", 0, 0); @@ -391,70 +432,88 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 0); // If size > 0 we should no cache by default - final SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(1) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(1) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache - final SearchResponse r2 = client.prepareSearch("index") - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r2); - assertThat(r2.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache even if // the cache flag is explicitly set on the request - final SearchResponse r3 = client.prepareSearch("index") - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setSize(0) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r3); - assertThat(r3.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setSize(0) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If the request has an non-filter aggregation containing now we should not cache - final SearchResponse r5 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r5); - assertThat(r5.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 0); // If size > 1 and cache flag is set on the request we should cache - final SearchResponse r6 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(1) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r6); - assertThat(r6.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(1) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 2); // If the request has a filter aggregation containing now we should cache since it gets rewritten - final SearchResponse r4 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setRequestCache(true) - .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) - .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r4); - assertThat(r4.getHits().getTotalHits().value, equalTo(7L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setRequestCache(true) + .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(7L)); + } + ); assertCacheState(client, "index", 0, 4); } @@ -476,32 +535,40 @@ public void testCacheWithFilteredAlias() { assertCacheState(client, "index", 0, 0); - SearchResponse r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + } + ); assertCacheState(client, "index", 0, 1); - r1 = client.prepareSearch("index") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(0) - .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")) - .get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + client.prepareSearch("index") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setSize(0) + .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + } + ); assertCacheState(client, "index", 1, 1); - r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse(client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0), response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + }); assertCacheState(client, "index", 1, 2); - r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); - ElasticsearchAssertions.assertAllSuccessful(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); + assertResponse(client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0), response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + }); assertCacheState(client, "index", 2, 2); } @@ -519,14 +586,13 @@ public void testProfileDisableCache() throws Exception { int expectedMisses = 0; for (int i = 0; i < 5; i++) { boolean profile = i % 2 == 0; - SearchResponse resp = client.prepareSearch("index") - .setRequestCache(true) - .setProfile(profile) - .setQuery(QueryBuilders.termQuery("k", "hello")) - .get(); - assertNoFailures(resp); - ElasticsearchAssertions.assertAllSuccessful(resp); - assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertNoFailuresAndResponse( + client.prepareSearch("index").setRequestCache(true).setProfile(profile).setQuery(QueryBuilders.termQuery("k", "hello")), + response -> { + ElasticsearchAssertions.assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + } + ); if (profile == false) { if (i == 1) { expectedMisses++; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java index d92bb64d4fd9a..0e6cd665b6951 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java @@ -86,7 +86,7 @@ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception int amountOfIndicesToClose = randomInt(numIndices - 1); for (int i = 0; i < amountOfIndicesToClose; i++) { String indexName = indexNames.get(i); - indicesAdmin().prepareClose(indexName).execute().actionGet(); + indicesAdmin().prepareClose(indexName).get(); } ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java index 70234d81feadd..1d41641d027a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java @@ -49,7 +49,7 @@ public void testWaitIfOngoing() throws InterruptedException { final int numIters = scaledRandomIntBetween(10, 30); for (int i = 0; i < numIters; i++) { for (int j = 0; j < 10; j++) { - client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setSource("{}", XContentType.JSON).get(); } final CountDownLatch latch = new CountDownLatch(10); final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); @@ -87,7 +87,7 @@ public void testRejectIllegalFlushParameters() { createIndex("test"); int numDocs = randomIntBetween(0, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setSource("{}", XContentType.JSON).get(); } assertThat( expectThrows( @@ -124,7 +124,7 @@ public void testFlushOnInactive() throws Exception { ensureGreen(indexName); int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex(indexName).setSource("f", "v").get(); + prepareIndex(indexName).setSource("f", "v").get(); } if (randomBoolean()) { internalCluster().restartNode(randomFrom(dataNodes)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java index 7541bce29fbe9..ec5d9876b7703 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java @@ -58,21 +58,18 @@ public void testConcurrentDynamicMapping() throws Exception { for (int j = 0; j < numDocs; j++) { Map source = new HashMap<>(); source.put(fieldName, "test-user"); - client().prepareIndex("test") - .setId(Integer.toString(currentID++)) - .setSource(source) - .execute(new ActionListener() { - @Override - public void onResponse(DocWriteResponse response) { - latch.countDown(); - } + prepareIndex("test").setId(Integer.toString(currentID++)).setSource(source).execute(new ActionListener() { + @Override + public void onResponse(DocWriteResponse response) { + latch.countDown(); + } - @Override - public void onFailure(Exception e) { - throwable.add(e); - latch.countDown(); - } - }); + @Override + public void onFailure(Exception e) { + throwable.add(e); + latch.countDown(); + } + }); } latch.await(); assertThat(throwable, emptyIterable()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java index 7a9aa7a47215a..8d7311e4f7619 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java @@ -58,7 +58,7 @@ public void testBWCMalformedDynamicTemplate() { .put("index.version.created", IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0)) ).setMapping(mapping) ); - client().prepareIndex(indexName).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + prepareIndex(indexName).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); assertNoFailures((indicesAdmin().prepareRefresh(indexName)).get()); assertHitCount(prepareSearch(indexName), 1); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index 33b80a4a4f3b7..720f48754519b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -41,7 +41,7 @@ protected Collection> nodePlugins() { public void testGetMappingsWhereThereAreNone() { createIndex("index"); - GetMappingsResponse response = indicesAdmin().prepareGetMappings().execute().actionGet(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); assertThat(response.mappings().containsKey("index"), equalTo(true)); assertEquals(MappingMetadata.EMPTY_MAPPINGS, response.mappings().get("index")); } @@ -59,42 +59,41 @@ private XContentBuilder getMappingForType() throws IOException { } public void testSimpleGetMappings() throws Exception { - indicesAdmin().prepareCreate("indexa").setMapping(getMappingForType()).execute().actionGet(); - indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType()).execute().actionGet(); + indicesAdmin().prepareCreate("indexa").setMapping(getMappingForType()).get(); + indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType()).get(); ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); // Get all mappings - GetMappingsResponse response = indicesAdmin().prepareGetMappings().execute().actionGet(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); assertThat(response.mappings().size(), equalTo(2)); assertThat(response.mappings().get("indexa"), notNullValue()); assertThat(response.mappings().get("indexb"), notNullValue()); // Get all mappings, via wildcard support - response = indicesAdmin().prepareGetMappings("*").execute().actionGet(); + response = indicesAdmin().prepareGetMappings("*").get(); assertThat(response.mappings().size(), equalTo(2)); assertThat(response.mappings().get("indexa"), notNullValue()); assertThat(response.mappings().get("indexb"), notNullValue()); // Get mappings in indexa - response = indicesAdmin().prepareGetMappings("indexa").execute().actionGet(); + response = indicesAdmin().prepareGetMappings("indexa").get(); assertThat(response.mappings().size(), equalTo(1)); assertThat(response.mappings().get("indexa"), notNullValue()); } public void testGetMappingsWithBlocks() throws IOException { - indicesAdmin().prepareCreate("test").setMapping(getMappingForType()).execute().actionGet(); + indicesAdmin().prepareCreate("test").setMapping(getMappingForType()).get(); ensureGreen(); for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { try { enableIndexBlock("test", block); - GetMappingsResponse response = indicesAdmin().prepareGetMappings().execute().actionGet(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); assertThat(response.mappings().size(), equalTo(1)); assertNotNull(response.mappings().get("test")); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 0eca3d689903e..0439fe6f67fb5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; @@ -48,6 +47,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -62,9 +62,8 @@ protected Collection> nodePlugins() { public void testDynamicUpdates() throws Exception { indicesAdmin().prepareCreate("test") .setSettings(indexSettings(1, 0).put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)) - .execute() - .actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + .get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); updateClusterSettings( Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(5)) ); @@ -75,8 +74,7 @@ public void testDynamicUpdates() throws Exception { String type = "type"; String fieldName = "field_" + type + "_" + rec; indexRequests.add( - client().prepareIndex("test") - .setId(Integer.toString(rec)) + prepareIndex("test").setId(Integer.toString(rec)) .setTimeout(TimeValue.timeValueMinutes(5)) .setSource(fieldName, "some_value") ); @@ -84,10 +82,9 @@ public void testDynamicUpdates() throws Exception { indexRandom(true, false, indexRequests); logger.info("checking all the documents are there"); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh().execute().actionGet(); + RefreshResponse refreshResponse = indicesAdmin().prepareRefresh().get(); assertThat(refreshResponse.getFailedShards(), equalTo(0)); - SearchResponse response = prepareSearch("test").setSize(0).execute().actionGet(); - assertThat(response.getHits().getTotalHits().value, equalTo((long) recCount)); + assertHitCount(prepareSearch("test").setSize(0), recCount); logger.info("checking all the fields are in the mappings"); @@ -103,30 +100,30 @@ public void testDynamicUpdates() throws Exception { public void testUpdateMappingWithoutType() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} - """).execute().actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + """).get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"properties":{"date":{"type":"integer"}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").execute().actionGet(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get(); assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" {"_doc":{"properties":{"body":{"type":"text"},"date":{"type":"integer"}}}}""")); } public void testUpdateMappingWithoutTypeMultiObjects() { createIndex("test", 1, 0); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" - {"properties":{"date":{"type":"integer"}}}""", XContentType.JSON).execute().actionGet(); + {"properties":{"date":{"type":"integer"}}}""", XContentType.JSON).get(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").execute().actionGet(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get(); assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" {"_doc":{"properties":{"date":{"type":"integer"}}}}""")); } @@ -134,13 +131,13 @@ public void testUpdateMappingWithoutTypeMultiObjects() { public void testUpdateMappingWithConflicts() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} - """).execute().actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + """).get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); try { indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"integer"}}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [body] cannot be changed from type [text] to [integer]")); @@ -150,11 +147,11 @@ public void testUpdateMappingWithConflicts() { public void testUpdateMappingWithNormsConflicts() { indicesAdmin().prepareCreate("test").setMapping(""" {"properties":{"body":{"type":"text", "norms": false }}} - """).execute().actionGet(); + """).get(); try { indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"text", "norms": true }}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Cannot update parameter [norms] from [false] to [true]")); @@ -166,12 +163,12 @@ public void testUpdateMappingWithNormsConflicts() { */ public void testUpdateMappingNoChanges() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" - {"properties":{"body":{"type":"text"}}}""").execute().actionGet(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + {"properties":{"body":{"type":"text"}}}""").get(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"text"}}}} - """, XContentType.JSON).execute().actionGet(); + """, XContentType.JSON).get(); // no changes, we return assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index a1068654daef2..e726c8a08002a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -51,6 +50,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; @@ -144,9 +144,9 @@ public void testMemoryBreaker() throws Exception { SearchRequestBuilder searchRequest = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC); String errMsg = "Data too large, data for [test] would be"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); errMsg = "which is larger than the limit of [100/100b]"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().setBreaker(true).get(); long breaks = 0; @@ -210,9 +210,9 @@ public void testRamAccountingTermsEnum() throws Exception { SearchRequestBuilder searchRequest = client.prepareSearch("ramtest").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC); String errMsg = "Data too large, data for [test] would be"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); errMsg = "which is larger than the limit of [100/100b]"; - assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg)); + assertFailures(searchRequest, RestStatus.TOO_MANY_REQUESTS, containsString(errMsg)); NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().setBreaker(true).get(); long breaks = 0; @@ -277,11 +277,10 @@ public void testAggTookTooMuch() throws Exception { // A terms aggregation on the "test" field should trip the bucket circuit breaker try { - SearchResponse resp = client.prepareSearch("cb-test") - .setQuery(matchAllQuery()) - .addAggregation(terms("my_terms").field("test")) - .get(); - assertTrue("there should be shard failures", resp.getFailedShards() > 0); + assertResponse( + client.prepareSearch("cb-test").setQuery(matchAllQuery()).addAggregation(terms("my_terms").field("test")), + response -> assertTrue("there should be shard failures", response.getFailedShards() > 0) + ); fail("aggregation should have tripped the breaker"); } catch (Exception e) { Throwable cause = e.getCause(); @@ -293,7 +292,7 @@ public void testAggTookTooMuch() throws Exception { /** Issues a cache clear and waits 30 seconds for the field data breaker to be cleared */ public void clearFieldData() throws Exception { - indicesAdmin().prepareClearCache().setFieldDataCache(true).execute().actionGet(); + indicesAdmin().prepareClearCache().setFieldDataCache(true).get(); assertBusy(() -> { NodesStatsResponse resp = clusterAdmin().prepareNodesStats().clear().setBreaker(true).get(new TimeValue(15, TimeUnit.SECONDS)); for (NodeStats nStats : resp.getNodes()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 5958f1ad57932..6a52159c71ab9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -61,7 +61,7 @@ protected boolean addMockInternalEngine() { } public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException { - for (NodeStats node : clusterAdmin().prepareNodesStats().clear().setBreaker(true).execute().actionGet().getNodes()) { + for (NodeStats node : clusterAdmin().prepareNodesStats().clear().setBreaker(true).get().getNodes()) { assertThat("Breaker is not set to 0", node.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L)); } @@ -108,7 +108,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate) .put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d); logger.info("creating index: [test] using settings: [{}]", settings.build()); - CreateIndexResponse response = indicesAdmin().prepareCreate("test").setSettings(settings).setMapping(mapping).execute().actionGet(); + CreateIndexResponse response = indicesAdmin().prepareCreate("test").setSettings(settings).setMapping(mapping).get(); final int numDocs; if (response.isShardsAcknowledged() == false) { /* some seeds just won't let you create the index at all and we enter a ping-pong mode @@ -126,8 +126,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc } for (int i = 0; i < numDocs; i++) { try { - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setTimeout(TimeValue.timeValueSeconds(1)) .setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i) .get(); @@ -146,7 +145,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc refreshResponse.getTotalShards() ); final int numSearches = scaledRandomIntBetween(50, 150); - NodesStatsResponse resp = clusterAdmin().prepareNodesStats().clear().setBreaker(true).execute().actionGet(); + NodesStatsResponse resp = clusterAdmin().prepareNodesStats().clear().setBreaker(true).get(); for (NodeStats stats : resp.getNodes()) { assertThat("Breaker is set to 0", stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L)); } @@ -172,7 +171,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc // breaker adjustment code, it should show up here by the breaker // estimate being either positive or negative. ensureGreen("test"); // make sure all shards are there - there could be shards that are still starting up. - assertAllSuccessful(indicesAdmin().prepareClearCache("test").setFieldDataCache(true).execute().actionGet()); + assertAllSuccessful(indicesAdmin().prepareClearCache("test").setFieldDataCache(true).get()); // Since .cleanUp() is no longer called on cache clear, we need to call it on each node manually for (String node : internalCluster().getNodeNames()) { @@ -181,7 +180,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc // Clean up the cache, ensuring that entries' listeners have been called fdCache.getCache().refresh(); } - NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().clear().setBreaker(true).execute().actionGet(); + NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().clear().setBreaker(true).get(); for (NodeStats stats : nodeStats.getNodes()) { assertThat( "Breaker reset to 0 last search success: " + success + " mapping: " + mapping, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index cdd77d5864a7b..0fe5845e9ed32 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -40,11 +40,11 @@ public void testPrimaryRelocationWhileIndexing() throws Exception { @Override public void run() { while (finished.get() == false && numAutoGenDocs.get() < 10_000) { - DocWriteResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); + DocWriteResponse indexResponse = prepareIndex("test").setId("id").setSource("field", "value").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - client().prepareIndex("test").setSource("auto", true).get(); + prepareIndex("test").setSource("auto", true).get(); numAutoGenDocs.incrementAndGet(); } } @@ -64,14 +64,12 @@ public void run() { logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName()); clusterAdmin().prepareReroute() .add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId())) - .execute() - .actionGet(); + .get(); ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setTimeout(TimeValue.timeValueSeconds(60)) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) - .execute() - .actionGet(); + .get(); if (clusterHealthResponse.isTimedOut()) { final String hotThreads = clusterAdmin().prepareNodesHotThreads() .setIgnoreIdleThreads(false) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index aee3d3680155e..762bbdda77df1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -281,14 +281,10 @@ private void unthrottleRecovery() { */ public void startShardRecovery(String sourceNode, String targetNode) throws Exception { logger.info("--> updating cluster settings with moving shard from node `{}` to node `{}`", sourceNode, targetNode); - clusterAdmin().prepareReroute() - .add(new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode)) - .execute() - .actionGet() - .getState(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode)).get().getState(); logger.info("--> requesting shard recovery"); - indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + indicesAdmin().prepareRecoveries(INDEX_NAME).get(); logger.info("--> waiting for recovery to begin on both the source and target nodes"); final Index index = resolveIndex(INDEX_NAME); @@ -353,7 +349,7 @@ public void testGatewayRecovery() throws Exception { ensureGreen(); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT_1)); assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1)); @@ -378,7 +374,7 @@ public void testGatewayRecoveryTestActiveOnly() throws Exception { ensureGreen(); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).setActiveOnly(true).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).setActiveOnly(true).get(); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(0)); // Should not expect any responses back @@ -408,7 +404,7 @@ public void testReplicaRecovery() throws Exception { setReplicaCount(1, INDEX_NAME); ensureGreen(INDEX_NAME); - final RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + final RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); // we should now have two total shards, one primary and one replica List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); @@ -461,8 +457,14 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio int numDocs = randomIntBetween(10, 200); final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(INDEX_NAME) - .setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + docs[i] = prepareIndex(INDEX_NAME).setSource( + "foo-int", + randomInt(), + "foo-string", + randomAlphaOfLength(32), + "foo-float", + randomFloat() + ); } indexRandom(randomBoolean(), docs); @@ -502,7 +504,7 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio public Settings onNodeStopped(String nodeName) throws Exception { safeAwait(phase1ReadyBlocked); // nodeB stopped, peer recovery from nodeA to nodeC, it will be cancelled after nodeB get started. - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); List nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates); @@ -551,7 +553,7 @@ public void testRerouteRecovery() throws Exception { throttleRecovery10Seconds(shardSize); logger.info("--> move shard from: {} to: {}", nodeA, nodeB); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB)).execute().actionGet().getState(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB)).get().getState(); logger.info("--> waiting for recovery to start both on source and target"); final Index index = resolveIndex(INDEX_NAME); @@ -563,7 +565,7 @@ public void testRerouteRecovery() throws Exception { }); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); List recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); List nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -600,7 +602,7 @@ public void testRerouteRecovery() throws Exception { // wait for it to be finished ensureGreen(); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(1)); @@ -637,9 +639,9 @@ public void testRerouteRecovery() throws Exception { throttleRecovery10Seconds(shardSize); logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC)).execute().actionGet().getState(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC)).get().getState(); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -664,7 +666,7 @@ public void testRerouteRecovery() throws Exception { internalCluster().stopNode(nodeA); ensureStableCluster(2); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -685,7 +687,7 @@ public void testRerouteRecovery() throws Exception { unthrottleRecovery(); ensureGreen(); - response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates); @@ -853,20 +855,19 @@ public void testSnapshotRecovery() throws Exception { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = createSnapshot(INDEX_NAME); - indicesAdmin().prepareClose(INDEX_NAME).execute().actionGet(); + indicesAdmin().prepareClose(INDEX_NAME).get(); logger.info("--> restore"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); int totalShards = restoreSnapshotResponse.getRestoreInfo().totalShards(); assertThat(totalShards, greaterThan(0)); ensureGreen(); logger.info("--> request recoveries"); - RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).execute().actionGet(); + RecoveryResponse response = indicesAdmin().prepareRecoveries(INDEX_NAME).get(); Repository repository = internalCluster().getAnyMasterNodeInstance(RepositoriesService.class).repository(REPO_NAME); final RepositoryData repositoryData = AbstractSnapshotIntegTestCase.getRepositoryData(repository); @@ -920,14 +921,20 @@ private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(name) - .setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + docs[i] = prepareIndex(name).setSource( + "foo-int", + randomInt(), + "foo-string", + randomAlphaOfLength(32), + "foo-float", + randomFloat() + ); } indexRandom(true, docs); flush(); assertThat(prepareSearch(name).setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs)); - return indicesAdmin().prepareStats(name).execute().actionGet(); + return indicesAdmin().prepareStats(name).get(); } private void validateIndexRecoveryState(RecoveryState.Index indexState) { @@ -986,7 +993,7 @@ public void testHistoryRetention() throws Exception { final List requests = new ArrayList<>(); final int replicatedDocCount = scaledRandomIntBetween(25, 250); while (requests.size() < replicatedDocCount) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); if (randomBoolean()) { @@ -1008,7 +1015,7 @@ public void testHistoryRetention() throws Exception { final int numNewDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numNewDocs; i++) { - client().prepareIndex(indexName).setSource("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex(indexName).setSource("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } // Flush twice to update the safe commit's local checkpoint assertThat(indicesAdmin().prepareFlush(indexName).setForce(true).execute().get().getFailedShards(), equalTo(0)); @@ -1040,10 +1047,7 @@ public void testDoNotInfinitelyWaitForMapping() { indicesAdmin().preparePutMapping("test").setSource("test_field", "type=text,analyzer=test_analyzer").get(); int numDocs = between(1, 10); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test") - .setId("u" + i) - .setSource(singletonMap("test_field", Integer.toString(i)), XContentType.JSON) - .get(); + prepareIndex("test").setId("u" + i).setSource(singletonMap("test_field", Integer.toString(i)), XContentType.JSON).get(); } Semaphore recoveryBlocked = new Semaphore(1); for (DiscoveryNode node : clusterService().state().nodes()) { @@ -1143,7 +1147,7 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, numDocs).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, numDocs).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); indicesAdmin().prepareRefresh(indexName).get(); // avoid refresh when we are failing a shard String failingNode = randomFrom(nodes); @@ -1234,7 +1238,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() throws Exception randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -1298,7 +1302,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() t randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -1329,9 +1333,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(1, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, between(1, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); // We do not guarantee that the replica can recover locally all the way to its own global checkpoint before starting @@ -1384,7 +1386,7 @@ public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonabl randomBoolean(), false, randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -1465,9 +1467,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, newDocCount) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, newDocCount).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); flush(indexName); @@ -1509,7 +1509,7 @@ public void testDoesNotCopyOperationsInSafeCommit() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); final ShardId shardId = new ShardId(resolveIndex(indexName), 0); @@ -1528,7 +1528,7 @@ public void testDoesNotCopyOperationsInSafeCommit() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()) + IntStream.range(0, between(0, 100)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); setReplicaCount(1, indexName); @@ -1585,9 +1585,7 @@ public void testRepeatedRecovery() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, randomIntBetween(0, 10)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 10)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1615,9 +1613,7 @@ public void testRepeatedRecovery() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, randomIntBetween(0, 10)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 10)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); logger.info("--> add replicas again"); @@ -1635,7 +1631,7 @@ public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception { .setSettings(indexSettings(1, 1).put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), randomBoolean())) ); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); ensureGreen(); @@ -1681,9 +1677,10 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { indexers[i] = new Thread(() -> { while (stopped.get() == false) { try { - DocWriteResponse response = client().prepareIndex(indexName) - .setSource(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) - .get(); + DocWriteResponse response = prepareIndex(indexName).setSource( + Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), + XContentType.JSON + ).get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); } catch (ElasticsearchException ignored) {} } @@ -1726,7 +1723,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { createIndex(indexName, indexSettings(1, 0).put("index.routing.allocation.include._name", String.join(",", dataNodes)).build()); ensureGreen(indexName); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1800,7 +1797,7 @@ public void testWaitForClusterStateToBeAppliedOnSourceNode() throws Exception { createIndex(indexName, indexSettings(1, 0).build()); ensureGreen(indexName); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1870,7 +1867,7 @@ public void testDeleteIndexDuringFinalization() throws Exception { createIndex(indexName, indexSettings(1, 0).build()); ensureGreen(indexName); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> prepareIndex(indexName).setSource("foo", "bar")) .toList(); indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1900,7 +1897,7 @@ public void testDeleteIndexDuringFinalization() throws Exception { // Process the TRANSLOG_OPS response on the replica (avoiding failing it due to a concurrent delete) but // before sending the response back send another document to the primary, advancing the GCP to prevent the replica // being marked as in-sync (NB below we delay the replica write until after the index is deleted) - client().prepareIndex(indexName).setSource("foo", "baz").execute(ActionListener.noop()); + prepareIndex(indexName).setSource("foo", "baz").execute(ActionListener.noop()); primaryIndexShard.addGlobalCheckpointListener( globalCheckpointBeforeRecovery + 1, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index e34d5059b4991..30c57873fc6b1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -266,39 +266,10 @@ public void testFailingReposAreTreatedAsNonExistingShardSnapshots() throws Excep } } - public void testFetchingInformationFromAnIncompatibleMasterNodeReturnsAnEmptyList() { - String indexName = "test"; - createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); - ShardId shardId = getShardIdForIndex(indexName); - - for (int i = 0; i < randomIntBetween(1, 50); i++) { - index(indexName, Integer.toString(i), Collections.singletonMap("foo", "bar")); - } - - String snapshotName = "snap"; - String repositoryName = "repo"; - createRepository(repositoryName, "fs", randomRepoPath(), true); - createSnapshot(repositoryName, snapshotName, indexName); - - RepositoriesService repositoriesService = internalCluster().getAnyMasterNodeInstance(RepositoriesService.class); - ThreadPool threadPool = internalCluster().getAnyMasterNodeInstance(ThreadPool.class); - ClusterService clusterService = internalCluster().getAnyMasterNodeInstance(ClusterService.class); - ShardSnapshotsService shardSnapshotsService = new ShardSnapshotsService(client(), repositoriesService, threadPool, clusterService) { - @Override - protected boolean masterSupportsFetchingLatestSnapshots() { - return false; - } - }; - - PlainActionFuture> latestSnapshots = PlainActionFuture.newFuture(); - shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, latestSnapshots); - assertThat(latestSnapshots.actionGet().isPresent(), is(equalTo(false))); - } - private Optional getLatestShardSnapshot(ShardId shardId) throws Exception { ShardSnapshotsService shardSnapshotsService = getShardSnapshotsService(); - PlainActionFuture> future = PlainActionFuture.newFuture(); + PlainActionFuture> future = new PlainActionFuture<>(); shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, future); return future.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java index b3e0d258cb113..56fffa682a36d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -40,8 +40,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); NumShards numShards = getNumShards("test"); @@ -53,8 +52,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.totalNumShards)); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("value", "test" + i).endObject()) .get(); } @@ -73,8 +71,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -107,8 +104,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) .setWaitForNodes(">=3") - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -130,8 +126,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) .setWaitForNodes(">=3") - .execute() - .actionGet(); + .get(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -165,8 +160,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -181,8 +175,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -202,8 +195,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) .setWaitForNodes(">=3") - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -229,8 +221,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) .setWaitForNodes(">=2") - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -256,8 +247,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { .setWaitForGreenStatus() .setWaitForNodes(">=1") .setWaitForActiveShards(numShards.numPrimaries) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -286,8 +276,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -302,8 +291,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -321,8 +309,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -348,8 +335,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForGreenStatus() .setWaitForNodes(">=2") .setWaitForActiveShards(numShards.numPrimaries * 2) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -375,8 +361,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { .setWaitForYellowStatus() .setWaitForNodes(">=1") .setWaitForActiveShards(numShards.numPrimaries) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -396,8 +381,7 @@ public void testAutoExpandNumberReplicas2() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -418,8 +402,7 @@ public void testAutoExpandNumberReplicas2() { .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 4) - .execute() - .actionGet(); + .get(); logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -444,8 +427,7 @@ public void testUpdateWithInvalidNumberOfReplicas() { try { indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, value)) - .execute() - .actionGet(); + .get(); fail("should have thrown an exception about the replica shard count"); } catch (IllegalArgumentException e) { assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_replicas] must be >= 0", e.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index e770127bf577c..563e6e0761cb1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -62,13 +62,10 @@ public void testInvalidDynamicUpdate() { createIndex("test"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.dummy", "boom")) - .execute() - .actionGet() + () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")).get() ); assertEquals(exception.getCause().getMessage(), "this setting goes boom"); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertNotEquals(indexMetadata.getSettings().get("index.dummy"), "invalid dynamic value"); } @@ -209,51 +206,35 @@ public void testUpdateDependentIndexSettings() { iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.acc.test.pw", "asdf")) - .execute() - .actionGet() + () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "asdf")).get() ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); // user has no dependency - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.acc.test.user", "asdf")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.user", "asdf")).get(); // now we are consistent - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.acc.test.pw", "test")) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "test")).get(); // now try to remove it and make sure it fails iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().putNull("index.acc.test.user")) - .execute() - .actionGet() + () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.acc.test.user")).get() ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); // now we are consistent indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().putNull("index.acc.test.pw").putNull("index.acc.test.user")) - .execute() - .actionGet(); + .get(); } } public void testResetDefaultWithWildcard() { createIndex("test"); - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.refresh_interval", -1)) - .execute() - .actionGet(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", -1)).get(); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -261,8 +242,8 @@ public void testResetDefaultWithWildcard() { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); } } - indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.ref*")).execute().actionGet(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.ref*")).get(); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -281,9 +262,8 @@ public void testResetDefault() { .put("index.translog.flush_threshold_size", "1024b") .put("index.translog.generation_threshold_size", "4096b") ) - .execute() - .actionGet(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + .get(); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -293,11 +273,8 @@ public void testResetDefault() { assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); } } - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().putNull("index.refresh_interval")) - .execute() - .actionGet(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.refresh_interval")).get(); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -319,8 +296,7 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", -1) // this one can change .put("index.fielddata.cache", "none") ) // this one can't - .execute() - .actionGet() + .get() ); expectThrows( IllegalArgumentException.class, @@ -330,10 +306,9 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one can't - .execute() - .actionGet() + .get() ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), nullValue()); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), nullValue()); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -346,10 +321,9 @@ public void testOpenCloseUpdateSettings() throws Exception { indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put("index.refresh_interval", -1)) // this one can change - .execute() - .actionGet(); + .get(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("-1")); // Now verify via dedicated get settings api: getSettingsResponse = indicesAdmin().prepareGetSettings("test").get(); @@ -362,18 +336,14 @@ public void testOpenCloseUpdateSettings() throws Exception { .setTimeout("30s") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(health.isTimedOut(), equalTo(false)); - indicesAdmin().prepareClose("test").execute().actionGet(); + indicesAdmin().prepareClose("test").get(); - indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) - .execute() - .actionGet(); + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getNumberOfReplicas(), equalTo(1)); indicesAdmin().prepareUpdateSettings("test") @@ -382,10 +352,9 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", "1s") // this one can change .put("index.fielddata.cache", "none") ) // this one can't - .execute() - .actionGet(); + .get(); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), equalTo("none")); @@ -397,11 +366,10 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one really can't - .execute() - .actionGet() + .get() ); assertThat(ex.getMessage(), containsString("final test setting [index.final], not updateable")); - indexMetadata = clusterAdmin().prepareState().execute().actionGet().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -413,11 +381,11 @@ public void testOpenCloseUpdateSettings() throws Exception { public void testEngineGCDeletesSetting() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("f", 1).setVersionType(VersionType.EXTERNAL).setVersion(1).get(); + prepareIndex("test").setId("1").setSource("f", 1).setVersionType(VersionType.EXTERNAL).setVersion(1).get(); client().prepareDelete("test", "1").setVersionType(VersionType.EXTERNAL).setVersion(2).get(); // delete is still in cache this should fail assertRequestBuilderThrows( - client().prepareIndex("test").setId("1").setSource("f", 3).setVersionType(VersionType.EXTERNAL).setVersion(1), + prepareIndex("test").setId("1").setSource("f", 3).setVersionType(VersionType.EXTERNAL).setVersion(1), VersionConflictEngineException.class ); @@ -432,7 +400,7 @@ public void testEngineGCDeletesSetting() throws Exception { } // delete should not be in cache - client().prepareIndex("test").setId("1").setSource("f", 2).setVersionType(VersionType.EXTERNAL).setVersion(1); + prepareIndex("test").setId("1").setSource("f", 2).setVersionType(VersionType.EXTERNAL).setVersion(1); } public void testUpdateSettingsWithBlocks() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 433ddab21c34d..2b07f36551279 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -47,7 +47,7 @@ public void testCloseAllRequiresName() { } private void assertIndexIsClosed(String... indices) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().execute().actionGet(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); for (String index : indices) { IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().indices().get(index); assertNotNull(indexMetadata); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index 91425067bd817..2ef7dc560b768 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -123,9 +123,7 @@ public void testCloseIndex() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); assertBusy(() -> closeIndices(indexName)); @@ -145,7 +143,7 @@ public void testCloseAlreadyClosedIndex() throws Exception { false, randomBoolean(), IntStream.range(0, randomIntBetween(1, 10)) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) + .mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) .collect(toList()) ); } @@ -187,9 +185,7 @@ public void testConcurrentClose() throws InterruptedException { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) @@ -258,7 +254,7 @@ public void testCloseWhileDeletingIndices() throws Exception { false, randomBoolean(), IntStream.range(0, 10) - .mapToObj(n -> client().prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) + .mapToObj(n -> prepareIndex(indexName).setId(String.valueOf(n)).setSource("num", n)) .collect(toList()) ); } @@ -361,9 +357,7 @@ public void testCloseIndexWaitForActiveShards() throws Exception { randomBoolean(), false, randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) - .collect(toList()) + IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); ensureGreen(indexName); @@ -388,9 +382,7 @@ public void testNoopPeerRecoveriesWhenIndexClosed() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); @@ -425,9 +417,7 @@ public void testRecoverExistingReplica() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); indicesAdmin().prepareFlush(indexName).get(); @@ -468,9 +458,7 @@ public void testRelocatedClosedIndexIssue() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); assertAcked(indicesAdmin().prepareClose(indexName)); // move single shard to second node @@ -489,9 +477,7 @@ public void testResyncPropagatePrimaryTerm() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); assertAcked(indicesAdmin().prepareClose(indexName)); @@ -520,9 +506,7 @@ public void testSearcherId() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - IntStream.range(0, randomIntBetween(0, 50)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)) - .collect(toList()) + IntStream.range(0, randomIntBetween(0, 50)).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).collect(toList()) ); ensureGreen(indexName); assertAcked(indicesAdmin().prepareClose(indexName)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index 53d3e62109536..77d38410d1ea9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -94,7 +94,7 @@ public void testCloseWhileRelocatingShards() throws Exception { createIndex(indexName); indexRandom( randomBoolean(), - IntStream.range(0, nbDocs).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).toList() + IntStream.range(0, nbDocs).mapToObj(n -> prepareIndex(indexName).setSource("num", n)).toList() ); } default -> { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 1ce0c0985b704..021515eb4cbcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -39,8 +38,7 @@ import static org.elasticsearch.indices.state.CloseIndexIT.assertIndexIsOpened; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -50,47 +48,43 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testSimpleCloseOpen() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); } public void testSimpleOpenMissingIndex() { - Exception e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareOpen("test1").execute().actionGet()); + Exception e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareOpen("test1").get()); assertThat(e.getMessage(), is("no such index [test1]")); } public void testOpenOneMissingIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - Exception e = expectThrows( - IndexNotFoundException.class, - () -> client.admin().indices().prepareOpen("test1", "test2").execute().actionGet() - ); + Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareOpen("test1", "test2").get()); assertThat(e.getMessage(), is("no such index [test2]")); } public void testOpenOneMissingIndexIgnoreMissing() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); OpenIndexResponse openIndexResponse = client.admin() .indices() .prepareOpen("test1", "test2") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .execute() - .actionGet(); + .get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); @@ -99,20 +93,20 @@ public void testOpenOneMissingIndexIgnoreMissing() { public void testCloseOpenMultipleIndices() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").execute().actionGet(); + AcknowledgedResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").get(); assertThat(closeIndexResponse1.isAcknowledged(), equalTo(true)); - AcknowledgedResponse closeIndexResponse2 = client.admin().indices().prepareClose("test2").execute().actionGet(); + AcknowledgedResponse closeIndexResponse2 = client.admin().indices().prepareClose("test2").get(); assertThat(closeIndexResponse2.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2"); assertIndexIsOpened("test3"); - OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet(); + OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").get(); assertThat(openIndexResponse1.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse1.isShardsAcknowledged(), equalTo(true)); - OpenIndexResponse openIndexResponse2 = client.admin().indices().prepareOpen("test2").execute().actionGet(); + OpenIndexResponse openIndexResponse2 = client.admin().indices().prepareOpen("test2").get(); assertThat(openIndexResponse2.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse2.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "test3"); @@ -121,15 +115,15 @@ public void testCloseOpenMultipleIndices() { public void testCloseOpenWildcard() { Client client = client(); createIndex("test1", "test2", "a"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test*").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test*").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2"); assertIndexIsOpened("a"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "a"); @@ -138,14 +132,14 @@ public void testCloseOpenWildcard() { public void testCloseOpenAll() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("_all").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("_all").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2", "test3"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("_all").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("_all").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "test3"); @@ -154,40 +148,37 @@ public void testCloseOpenAll() { public void testCloseOpenAllWildcard() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("*").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("*").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2", "test3"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("*").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("*").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2", "test3"); } public void testOpenNoIndex() { - Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen().execute().actionGet()); + Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen().get()); assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNullIndex() { - Exception e = expectThrows( - ActionRequestValidationException.class, - () -> indicesAdmin().prepareOpen((String[]) null).execute().actionGet() - ); + Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen((String[]) null).get()); assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenAlreadyOpenedIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); // no problem if we try to open an index that's already in open state - OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet(); + OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").get(); assertThat(openIndexResponse1.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse1.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); @@ -196,22 +187,17 @@ public void testOpenAlreadyOpenedIndex() { public void testSimpleCloseOpenAlias() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse aliasesResponse = client.admin() - .indices() - .prepareAliases() - .addAlias("test1", "test1-alias") - .execute() - .actionGet(); + AcknowledgedResponse aliasesResponse = client.admin().indices().prepareAliases().addAlias("test1", "test1-alias").get(); assertThat(aliasesResponse.isAcknowledged(), equalTo(true)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1-alias").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1-alias").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1-alias").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1-alias").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1"); @@ -220,29 +206,19 @@ public void testSimpleCloseOpenAlias() { public void testCloseOpenAliasMultipleIndices() { Client client = client(); createIndex("test1", "test2"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - AcknowledgedResponse aliasesResponse1 = client.admin() - .indices() - .prepareAliases() - .addAlias("test1", "test-alias") - .execute() - .actionGet(); + AcknowledgedResponse aliasesResponse1 = client.admin().indices().prepareAliases().addAlias("test1", "test-alias").get(); assertThat(aliasesResponse1.isAcknowledged(), equalTo(true)); - AcknowledgedResponse aliasesResponse2 = client.admin() - .indices() - .prepareAliases() - .addAlias("test2", "test-alias") - .execute() - .actionGet(); + AcknowledgedResponse aliasesResponse2 = client.admin().indices().prepareAliases().addAlias("test2", "test-alias").get(); assertThat(aliasesResponse2.isAcknowledged(), equalTo(true)); - AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test-alias").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test-alias").get(); assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); assertIndexIsClosed("test1", "test2"); - OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test-alias").execute().actionGet(); + OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test-alias").get(); assertThat(openIndexResponse.isAcknowledged(), equalTo(true)); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test1", "test2"); @@ -285,7 +261,7 @@ public void testOpenCloseWithDocs() throws IOException, ExecutionException, Inte int docs = between(10, 100); IndexRequestBuilder[] builder = new IndexRequestBuilder[docs]; for (int i = 0; i < docs; i++) { - builder[i] = client().prepareIndex("test").setId("" + i).setSource("test", "init"); + builder[i] = prepareIndex("test").setId("" + i).setSource("test", "init"); } indexRandom(true, builder); if (randomBoolean()) { @@ -296,9 +272,7 @@ public void testOpenCloseWithDocs() throws IOException, ExecutionException, Inte // check the index still contains the records that we indexed indicesAdmin().prepareOpen("test").execute().get(); ensureGreen(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, docs); + assertHitCountAndNoFailures(prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")), docs); } public void testOpenCloseIndexWithBlocks() { @@ -307,7 +281,7 @@ public void testOpenCloseIndexWithBlocks() { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId("" + i).setSource("test", "init").execute().actionGet(); + prepareIndex("test").setId("" + i).setSource("test", "init").get(); } for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { @@ -315,12 +289,12 @@ public void testOpenCloseIndexWithBlocks() { enableIndexBlock("test", blockSetting); // Closing an index is not blocked - AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").get(); assertAcked(closeIndexResponse); assertIndexIsClosed("test"); // Opening an index is not blocked - OpenIndexResponse openIndexResponse = indicesAdmin().prepareOpen("test").execute().actionGet(); + OpenIndexResponse openIndexResponse = indicesAdmin().prepareOpen("test").get(); assertAcked(openIndexResponse); assertThat(openIndexResponse.isShardsAcknowledged(), equalTo(true)); assertIndexIsOpened("test"); @@ -340,7 +314,7 @@ public void testOpenCloseIndexWithBlocks() { } } - AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").execute().actionGet(); + AcknowledgedResponse closeIndexResponse = indicesAdmin().prepareClose("test").get(); assertAcked(closeIndexResponse); assertIndexIsClosed("test"); @@ -363,7 +337,7 @@ public void testTranslogStats() throws Exception { final int nbDocs = randomIntBetween(0, 50); int uncommittedOps = 0; for (long i = 0; i < nbDocs; i++) { - final DocWriteResponse indexResponse = client().prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); + final DocWriteResponse indexResponse = prepareIndex(indexName).setId(Long.toString(i)).setSource("field", i).get(); assertThat(indexResponse.status(), is(RestStatus.CREATED)); if (rarely()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index dd22f50ab420b..b5448498f0ce9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -43,7 +43,7 @@ public void testSimpleOpenClose() { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); @@ -54,7 +54,7 @@ public void testSimpleOpenClose() { logger.info("--> trying to index into a closed index ..."); try { - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); fail(); } catch (IndexClosedException e) { // all is well @@ -76,7 +76,7 @@ public void testSimpleOpenClose() { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); } public void testFastCloseAfterCreateContinuesCreateAfterOpen() { @@ -111,7 +111,7 @@ public void testFastCloseAfterCreateContinuesCreateAfterOpen() { ); logger.info("--> indexing a simple document"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); } public void testConsistencyAfterIndexCreationFailure() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a5a9ca2862a0e..ec62a1cbbd9bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -133,11 +133,11 @@ public void testFieldDataStats() { .setMapping("field", "type=text,fielddata=true", "field2", "type=text,fielddata=true") ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); - client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); - indicesAdmin().prepareRefresh().execute().actionGet(); + prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").get(); + prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").get(); + indicesAdmin().prepareRefresh().get(); - NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -146,14 +146,14 @@ public void testFieldDataStats() { .getMemorySizeInBytes(), equalTo(0L) ); - IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).execute().actionGet(); + IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); // sort to load it to field data... - prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet(); - prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet(); + prepareSearch().addSort("field", SortOrder.ASC).get(); + prepareSearch().addSort("field", SortOrder.ASC).get(); - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -162,18 +162,17 @@ public void testFieldDataStats() { .getMemorySizeInBytes(), greaterThan(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); // sort to load it to field data... - prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet(); - prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet(); + prepareSearch().addSort("field2", SortOrder.ASC).get(); + prepareSearch().addSort("field2", SortOrder.ASC).get(); // now check the per field stats nodesStats = clusterAdmin().prepareNodesStats("data:true") .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*")) - .execute() - .actionGet(); + .get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -207,7 +206,7 @@ public void testFieldDataStats() { ) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setFieldDataFields("*").execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setFieldDataFields("*").get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), greaterThan(0L)); assertThat( @@ -215,8 +214,8 @@ public void testFieldDataStats() { lessThan(indicesStats.getTotal().getFieldData().getMemorySizeInBytes()) ); - indicesAdmin().prepareClearCache().setFieldDataCache(true).execute().actionGet(); - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + indicesAdmin().prepareClearCache().setFieldDataCache(true).get(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -225,7 +224,7 @@ public void testFieldDataStats() { .getMemorySizeInBytes(), equalTo(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); } @@ -237,12 +236,12 @@ public void testClearAllCaches() throws Exception { .setMapping("field", "type=text,fielddata=true") ); ensureGreen(); - clusterAdmin().prepareHealth().setWaitForGreenStatus().execute().actionGet(); - client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); - client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); - indicesAdmin().prepareRefresh().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + prepareIndex("test").setId("1").setSource("field", "value1").get(); + prepareIndex("test").setId("2").setSource("field", "value2").get(); + indicesAdmin().prepareRefresh().get(); - NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + NodesStatsResponse nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -260,20 +259,15 @@ public void testClearAllCaches() throws Exception { equalTo(0L) ); - IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test") - .clear() - .setFieldData(true) - .setQueryCache(true) - .execute() - .actionGet(); + IndicesStatsResponse indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0L)); // sort to load it to field data and filter to load filter cache - prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value1")).addSort("field", SortOrder.ASC).execute().actionGet(); - prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value2")).addSort("field", SortOrder.ASC).execute().actionGet(); + prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value1")).addSort("field", SortOrder.ASC).get(); + prepareSearch().setPostFilter(QueryBuilders.termQuery("field", "value2")).addSort("field", SortOrder.ASC).get(); - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -291,13 +285,13 @@ public void testClearAllCaches() throws Exception { greaterThan(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0L)); - indicesAdmin().prepareClearCache().execute().actionGet(); + indicesAdmin().prepareClearCache().get(); Thread.sleep(100); // Make sure the filter cache entries have been removed... - nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); + nodesStats = clusterAdmin().prepareNodesStats("data:true").setIndices(true).get(); assertThat( nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes() .get(1) @@ -315,7 +309,7 @@ public void testClearAllCaches() throws Exception { equalTo(0L) ); - indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).execute().actionGet(); + indicesStats = indicesAdmin().prepareStats("test").clear().setFieldData(true).setQueryCache(true).get(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L)); assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0L)); } @@ -335,8 +329,7 @@ public void testQueryCache() throws Exception { while (true) { IndexRequestBuilder[] builders = new IndexRequestBuilder[pageDocs]; for (int i = 0; i < pageDocs; ++i) { - builders[i] = client().prepareIndex("idx") - .setId(Integer.toString(counter++)) + builders[i] = prepareIndex("idx").setId(Integer.toString(counter++)) .setSource(jsonBuilder().startObject().field("common", "field").field("str_value", "s" + i).endObject()); } indexRandom(true, builders); @@ -383,8 +376,7 @@ public void testQueryCache() throws Exception { // index the data again... IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("idx") - .setId(Integer.toString(i)) + builders[i] = prepareIndex("idx").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("common", "field").field("str_value", "s" + i).endObject()); } indexRandom(true, builders); @@ -494,13 +486,13 @@ public void testNonThrottleStats() throws Exception { sb.append(termUpto++); sb.append(" some random text that keeps repeating over and over again hambone"); } - client().prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); + prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); } refresh(); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); // nodesStats = clusterAdmin().prepareNodesStats().setIndices(true).get(); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis(), equalTo(0L)); } @@ -530,13 +522,13 @@ public void testThrottleStats() throws Exception { sb.append(' '); sb.append(termUpto++); } - client().prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); + prepareIndex("test").setId("" + termUpto).setSource("field" + (i % 10), sb.toString()).get(); if (i % 2 == 0) { refresh(); } } refresh(); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); // nodesStats = clusterAdmin().prepareNodesStats().setIndices(true).get(); done = stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis() > 0; if (System.currentTimeMillis() - start > 300 * 1000) { // Wait 5 minutes for throttling to kick in @@ -556,9 +548,9 @@ public void testSimpleStats() throws Exception { createIndex("test1", "test2"); ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); + prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").get(); + prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").get(); + prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").get(); refresh(); NumShards test1 = getNumShards("test1"); @@ -567,7 +559,7 @@ public void testSimpleStats() throws Exception { long test2ExpectedWrites = test2.dataCopies; long totalExpectedWrites = test1ExpectedWrites + test2ExpectedWrites; - IndicesStatsResponse stats = indicesAdmin().prepareStats().execute().actionGet(); + IndicesStatsResponse stats = indicesAdmin().prepareStats().get(); assertThat(stats.getPrimaries().getDocs().getCount(), equalTo(3L)); assertThat(stats.getTotal().getDocs().getCount(), equalTo(totalExpectedWrites)); assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexCount(), equalTo(3L)); @@ -601,7 +593,7 @@ public void testSimpleStats() throws Exception { assertThat(stats.getIndex("test2").getTotal().getSearch().getTotal().getQueryCurrent(), equalTo(0L)); // check flags - stats = indicesAdmin().prepareStats().clear().setFlush(true).setRefresh(true).setMerge(true).execute().actionGet(); + stats = indicesAdmin().prepareStats().clear().setFlush(true).setRefresh(true).setMerge(true).get(); assertThat(stats.getTotal().getDocs(), nullValue()); assertThat(stats.getTotal().getStore(), nullValue()); @@ -611,19 +603,19 @@ public void testSimpleStats() throws Exception { assertThat(stats.getTotal().getRefresh(), notNullValue()); // check get - GetResponse getResponse = client().prepareGet("test2", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test2", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getTotal().getGet().getCount(), equalTo(1L)); assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1L)); assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(0L)); // missing get - getResponse = client().prepareGet("test2", "2").execute().actionGet(); + getResponse = client().prepareGet("test2", "2").get(); assertThat(getResponse.isExists(), equalTo(false)); - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getTotal().getGet().getCount(), equalTo(2L)); assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1L)); assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(1L)); @@ -637,8 +629,7 @@ public void testSimpleStats() throws Exception { .setRefresh(true) .setMerge(true) .clear() // reset defaults - .execute() - .actionGet(); + .get(); assertThat(stats.getTotal().getDocs(), nullValue()); assertThat(stats.getTotal().getStore(), nullValue()); @@ -648,37 +639,31 @@ public void testSimpleStats() throws Exception { // index failed try { - client().prepareIndex("test1") - .setId(Integer.toString(1)) + prepareIndex("test1").setId(Integer.toString(1)) .setSource("field", "value") .setVersion(1) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} try { - client().prepareIndex("test2") - .setId(Integer.toString(1)) + prepareIndex("test2").setId(Integer.toString(1)) .setSource("field", "value") .setVersion(1) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} try { - client().prepareIndex("test2") - .setId(Integer.toString(1)) + prepareIndex("test2").setId(Integer.toString(1)) .setSource("field", "value") .setVersion(1) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} - stats = indicesAdmin().prepareStats().execute().actionGet(); + stats = indicesAdmin().prepareStats().get(); assertThat(stats.getIndex("test2").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(3L)); } @@ -697,8 +682,7 @@ public void testMergeStats() { .setRefresh(true) .setMerge(true) .clear() // reset defaults - .execute() - .actionGet(); + .get(); assertThat(stats.getTotal().getDocs(), nullValue()); assertThat(stats.getTotal().getStore(), nullValue()); @@ -707,11 +691,11 @@ public void testMergeStats() { assertThat(stats.getTotal().getSearch(), nullValue()); for (int i = 0; i < 20; i++) { - client().prepareIndex("test_index").setId(Integer.toString(i)).setSource("field", "value").execute().actionGet(); - indicesAdmin().prepareFlush().execute().actionGet(); + prepareIndex("test_index").setId(Integer.toString(i)).setSource("field", "value").get(); + indicesAdmin().prepareFlush().get(); } - indicesAdmin().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); - stats = indicesAdmin().prepareStats().setMerge(true).execute().actionGet(); + indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); + stats = indicesAdmin().prepareStats().setMerge(true).get(); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0L)); @@ -738,7 +722,7 @@ public void testSegmentsStats() { assertThat(stats.getTotal().getSegments().getVersionMapMemoryInBytes(), greaterThan(0L)); indicesAdmin().prepareFlush().get(); - indicesAdmin().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); + indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); indicesAdmin().prepareRefresh().get(); final boolean includeSegmentFileSizes = randomBoolean(); @@ -765,18 +749,18 @@ public void testAllFlags() throws Exception { ensureGreen(); - client().prepareIndex("test_index").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test_index").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test_index_2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); + prepareIndex("test_index").setId(Integer.toString(1)).setSource("field", "value").get(); + prepareIndex("test_index").setId(Integer.toString(2)).setSource("field", "value").get(); + prepareIndex("test_index_2").setId(Integer.toString(1)).setSource("field", "value").get(); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); Flag[] values = CommonStatsFlags.SHARD_LEVEL.getFlags(); for (Flag flag : values) { set(flag, builder, false); } - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); for (Flag flag : values) { assertThat(isSet(flag, stats.getPrimaries()), equalTo(false)); assertThat(isSet(flag, stats.getTotal()), equalTo(false)); @@ -785,7 +769,7 @@ public void testAllFlags() throws Exception { for (Flag flag : values) { set(flag, builder, true); } - stats = builder.execute().actionGet(); + stats = builder.get(); for (Flag flag : values) { assertThat(isSet(flag, stats.getPrimaries()), equalTo(true)); assertThat(isSet(flag, stats.getTotal()), equalTo(true)); @@ -805,7 +789,7 @@ public void testAllFlags() throws Exception { for (Flag flag : flags) { // set the flags set(flag, builder, true); } - stats = builder.execute().actionGet(); + stats = builder.get(); for (Flag flag : flags) { // check the flags assertThat(isSet(flag, stats.getPrimaries()), equalTo(true)); assertThat(isSet(flag, stats.getTotal()), equalTo(true)); @@ -896,35 +880,35 @@ public void testMultiIndex() throws Exception { ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); + prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").get(); + prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").get(); + prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").get(); refresh(); int numShards1 = getNumShards("test1").totalNumShards; int numShards2 = getNumShards("test2").totalNumShards; IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("_all").execute().actionGet(); + stats = builder.setIndices("_all").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("_all").execute().actionGet(); + stats = builder.setIndices("_all").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("*").execute().actionGet(); + stats = builder.setIndices("*").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("test1").execute().actionGet(); + stats = builder.setIndices("test1").get(); assertThat(stats.getTotalShards(), equalTo(numShards1)); - stats = builder.setIndices("test1", "test2").execute().actionGet(); + stats = builder.setIndices("test1", "test2").get(); assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2)); - stats = builder.setIndices("*2").execute().actionGet(); + stats = builder.setIndices("*2").get(); assertThat(stats.getTotalShards(), equalTo(numShards2)); } @@ -953,37 +937,37 @@ public void testCompletionFieldsParam() throws Exception { }""")); ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource(""" + prepareIndex("test1").setId(Integer.toString(1)).setSource(""" {"bar":"bar","baz":"baz"}""", XContentType.JSON).get(); refresh(); IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields(), is(nullValue())); - stats = builder.setCompletionFields("bar.completion").execute().actionGet(); + stats = builder.setCompletionFields("bar.completion").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(false)); - stats = builder.setCompletionFields("bar.completion", "baz.completion").execute().actionGet(); + stats = builder.setCompletionFields("bar.completion", "baz.completion").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0L)); - stats = builder.setCompletionFields("*").execute().actionGet(); + stats = builder.setCompletionFields("*").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0L)); - stats = builder.setCompletionFields("*r*").execute().actionGet(); + stats = builder.setCompletionFields("*r*").get(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); @@ -996,30 +980,30 @@ public void testGroupsParam() throws Exception { ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("foo", "bar").execute().actionGet(); + prepareIndex("test1").setId(Integer.toString(1)).setSource("foo", "bar").get(); refresh(); - prepareSearch("_all").setStats("bar", "baz").execute().actionGet(); + prepareSearch("_all").setStats("bar", "baz").get(); IndicesStatsRequestBuilder builder = indicesAdmin().prepareStats(); - IndicesStatsResponse stats = builder.execute().actionGet(); + IndicesStatsResponse stats = builder.get(); assertThat(stats.getTotal().search.getTotal().getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats(), is(nullValue())); - stats = builder.setGroups("bar").execute().actionGet(); + stats = builder.setGroups("bar").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().containsKey("baz"), is(false)); - stats = builder.setGroups("bar", "baz").execute().actionGet(); + stats = builder.setGroups("bar", "baz").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().get("baz").getQueryCount(), greaterThan(0L)); - stats = builder.setGroups("*").execute().actionGet(); + stats = builder.setGroups("*").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().get("baz").getQueryCount(), greaterThan(0L)); - stats = builder.setGroups("*r").execute().actionGet(); + stats = builder.setGroups("*r").get(); assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0L)); assertThat(stats.getTotal().search.getGroupStats().containsKey("baz"), is(false)); @@ -1135,8 +1119,8 @@ public void testFilterCacheStats() throws Exception { indexRandom( false, true, - client().prepareIndex("index").setId("1").setSource("foo", "bar"), - client().prepareIndex("index").setId("2").setSource("foo", "baz") + prepareIndex("index").setId("1").setSource("foo", "bar"), + prepareIndex("index").setId("2").setSource("foo", "baz") ); persistGlobalCheckpoint("index"); // Need to persist the global checkpoint for the soft-deletes retention MP. refresh(); @@ -1201,8 +1185,8 @@ public void testFilterCacheStats() throws Exception { indexRandom( true, - client().prepareIndex("index").setId("1").setSource("foo", "bar"), - client().prepareIndex("index").setId("2").setSource("foo", "baz") + prepareIndex("index").setId("1").setSource("foo", "bar"), + prepareIndex("index").setId("2").setSource("foo", "baz") ); assertBusy(() -> { @@ -1299,7 +1283,7 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti } while (stop.get() == false) { final String id = Integer.toString(idGenerator.incrementAndGet()); - final DocWriteResponse response = client().prepareIndex("test").setId(id).setSource("{}", XContentType.JSON).get(); + final DocWriteResponse response = prepareIndex("test").setId(id).setSource("{}", XContentType.JSON).get(); assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); } }); @@ -1371,8 +1355,7 @@ public void testWriteLoadIsCaptured() throws Exception { final List> indexRequestFutures = new ArrayList<>(numDocs); for (int i = 0; i < numDocs; i++) { indexRequestFutures.add( - client().prepareIndex(indexName) - .setId(Integer.toString(idGenerator.incrementAndGet())) + prepareIndex(indexName).setId(Integer.toString(idGenerator.incrementAndGet())) .setSource("{}", XContentType.JSON) .execute() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java index 29c38c07fcbd7..0e385768fc256 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java @@ -33,9 +33,9 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct)).get(); - ComposableIndexTemplate cit = new ComposableIndexTemplate( - Collections.singletonList("coleslaw"), - new Template(null, new CompressedXContent(""" + ComposableIndexTemplate cit = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("coleslaw")) + .template(new Template(null, new CompressedXContent(""" { "dynamic": false, "properties": { @@ -43,12 +43,12 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { "type": "keyword" } } - }"""), null), - Collections.singletonList("my-ct"), - 4L, - 5L, - Collections.singletonMap("egg", "bread") - ); + }"""), null)) + .componentTemplates(Collections.singletonList("my-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("egg", "bread")) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit) @@ -68,9 +68,9 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { }"""), null), 3L, Collections.singletonMap("eggplant", "potato")); client().execute(PutComponentTemplateAction.INSTANCE, new PutComponentTemplateAction.Request("my-ct").componentTemplate(ct2)).get(); - ComposableIndexTemplate cit2 = new ComposableIndexTemplate( - Collections.singletonList("coleslaw"), - new Template(null, new CompressedXContent(""" + ComposableIndexTemplate cit2 = ComposableIndexTemplate.builder() + .indexPatterns(Collections.singletonList("coleslaw")) + .template(new Template(null, new CompressedXContent(""" { "dynamic": true, "properties": { @@ -78,12 +78,12 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { "type": "integer" } } - }"""), null), - Collections.singletonList("my-ct"), - 4L, - 5L, - Collections.singletonMap("egg", "bread") - ); + }"""), null)) + .componentTemplates(Collections.singletonList("my-ct")) + .priority(4L) + .version(5L) + .metadata(Collections.singletonMap("egg", "bread")) + .build(); client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit2) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java index 22ae5d62dc297..48958e3e39b9b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java @@ -44,13 +44,12 @@ public void testIndexTemplatesWithBlocks() throws IOException { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); try { setClusterReadOnly(true); - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_blocks").execute().actionGet(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_blocks").get(); assertThat(response.getIndexTemplates(), hasSize(1)); assertBlocked( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 359b90a351b60..25cdd413aec2b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.common.ParsingException; @@ -49,6 +48,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -149,39 +149,38 @@ public void testSimpleIndexTemplateTests() throws Exception { assertThat(response.getIndexTemplates(), hasSize(2)); // index something into test_index, will match on both templates - client().prepareIndex("test_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(); - SearchResponse searchResponse = prepareSearch("test_index").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); - // field2 is not stored. - assertThat(searchResponse.getHits().getAt(0).field("field2"), nullValue()); + assertResponse( + prepareSearch("test_index").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + searchResponse -> { + assertHitCount(searchResponse, 1); + assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); + // field2 is not stored. + assertThat(searchResponse.getHits().getAt(0).field("field2"), nullValue()); + } + ); - client().prepareIndex("text_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("text_index").setId("1").setSource("field1", "value1", "field2", "value 2").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(); // now only match on one template (template_1) - searchResponse = prepareSearch("text_index").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - if (searchResponse.getFailedShards() > 0) { - logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures())); - } - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).field("field2").getValue().toString(), equalTo("value 2")); + assertResponse( + prepareSearch("text_index").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + searchResponse -> { + if (searchResponse.getFailedShards() > 0) { + logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures())); + } + assertHitCount(searchResponse, 1); + assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value1")); + assertThat(searchResponse.getHits().getAt(0).field("field2").getValue().toString(), equalTo("value 2")); + } + ); } public void testDeleteIndexTemplate() throws Exception { - final int existingTemplates = admin().cluster().prepareState().execute().actionGet().getState().metadata().templates().size(); + final int existingTemplates = admin().cluster().prepareState().get().getState().metadata().templates().size(); logger.info("--> put template_1 and template_2"); indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) @@ -203,8 +202,7 @@ public void testDeleteIndexTemplate() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); indicesAdmin().preparePutTemplate("template_2") .setPatterns(Collections.singletonList("test*")) @@ -222,13 +220,12 @@ public void testDeleteIndexTemplate() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> explicitly delete template_1"); - indicesAdmin().prepareDeleteTemplate("template_1").execute().actionGet(); + indicesAdmin().prepareDeleteTemplate("template_1").get(); - ClusterState state = admin().cluster().prepareState().execute().actionGet().getState(); + ClusterState state = admin().cluster().prepareState().get().getState(); assertThat(state.metadata().templates().size(), equalTo(1 + existingTemplates)); assertThat(state.metadata().templates().containsKey("template_2"), equalTo(true)); @@ -255,19 +252,15 @@ public void testDeleteIndexTemplate() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> delete template*"); - indicesAdmin().prepareDeleteTemplate("template*").execute().actionGet(); - assertThat( - admin().cluster().prepareState().execute().actionGet().getState().metadata().templates().size(), - equalTo(existingTemplates) - ); + indicesAdmin().prepareDeleteTemplate("template*").get(); + assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(existingTemplates)); logger.info("--> delete * with no templates, make sure we don't get a failure"); - indicesAdmin().prepareDeleteTemplate("*").execute().actionGet(); - assertThat(admin().cluster().prepareState().execute().actionGet().getState().metadata().templates().size(), equalTo(0)); + indicesAdmin().prepareDeleteTemplate("*").get(); + assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(0)); } public void testThatGetIndexTemplatesWorks() throws Exception { @@ -293,11 +286,10 @@ public void testThatGetIndexTemplatesWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> get template template_1"); - GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1").execute().actionGet(); + GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(1)); assertThat(getTemplate1Response.getIndexTemplates().get(0), is(notNullValue())); assertThat(getTemplate1Response.getIndexTemplates().get(0).patterns(), is(Collections.singletonList("te*"))); @@ -305,7 +297,7 @@ public void testThatGetIndexTemplatesWorks() throws Exception { assertThat(getTemplate1Response.getIndexTemplates().get(0).getVersion(), is(123)); logger.info("--> get non-existing-template"); - GetIndexTemplatesResponse getTemplate2Response = indicesAdmin().prepareGetTemplates("non-existing-template").execute().actionGet(); + GetIndexTemplatesResponse getTemplate2Response = indicesAdmin().prepareGetTemplates("non-existing-template").get(); assertThat(getTemplate2Response.getIndexTemplates(), hasSize(0)); } @@ -331,8 +323,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> put template_2"); indicesAdmin().preparePutTemplate("template_2") @@ -355,8 +346,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> put template3"); indicesAdmin().preparePutTemplate("template3") @@ -379,11 +369,10 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); logger.info("--> get template template_*"); - GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_*").execute().actionGet(); + GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_*").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2)); List templateNames = new ArrayList<>(); @@ -392,7 +381,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { assertThat(templateNames, containsInAnyOrder("template_1", "template_2")); logger.info("--> get all templates"); - getTemplate1Response = indicesAdmin().prepareGetTemplates("template*").execute().actionGet(); + getTemplate1Response = indicesAdmin().prepareGetTemplates("template*").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(3)); templateNames = new ArrayList<>(); @@ -402,7 +391,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { assertThat(templateNames, containsInAnyOrder("template_1", "template_2", "template3")); logger.info("--> get templates template_1 and template_2"); - getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1", "template_2").execute().actionGet(); + getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1", "template_2").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2)); templateNames = new ArrayList<>(); @@ -498,11 +487,11 @@ public void testIndexTemplateWithAliases() throws Exception { assertAcked(prepareCreate("test_index")); ensureGreen(); - client().prepareIndex("test_index").setId("1").setSource("type", "type1", "field", "A value").get(); - client().prepareIndex("test_index").setId("2").setSource("type", "type2", "field", "B value").get(); - client().prepareIndex("test_index").setId("3").setSource("type", "typeX", "field", "C value").get(); - client().prepareIndex("test_index").setId("4").setSource("type", "typeY", "field", "D value").get(); - client().prepareIndex("test_index").setId("5").setSource("type", "typeZ", "field", "E value").get(); + prepareIndex("test_index").setId("1").setSource("type", "type1", "field", "A value").get(); + prepareIndex("test_index").setId("2").setSource("type", "type2", "field", "B value").get(); + prepareIndex("test_index").setId("3").setSource("type", "typeX", "field", "C value").get(); + prepareIndex("test_index").setId("4").setSource("type", "typeY", "field", "D value").get(); + prepareIndex("test_index").setId("5").setSource("type", "typeZ", "field", "E value").get(); GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases().setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); @@ -514,20 +503,22 @@ public void testIndexTemplateWithAliases() throws Exception { assertHitCount(prepareSearch("simple_alias"), 5L); assertHitCount(prepareSearch("templated_alias-test_index"), 5L); - SearchResponse searchResponse = prepareSearch("filtered_alias").get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("type"), equalTo("type2")); + assertResponse(prepareSearch("filtered_alias"), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("type"), equalTo("type2")); + }); // Search the complex filter alias - searchResponse = prepareSearch("complex_filtered_alias").get(); - assertHitCount(searchResponse, 3L); + assertResponse(prepareSearch("complex_filtered_alias"), response -> { + assertHitCount(response, 3L); - Set types = new HashSet<>(); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - types.add(searchHit.getSourceAsMap().get("type").toString()); - } - assertThat(types.size(), equalTo(3)); - assertThat(types, containsInAnyOrder("typeX", "typeY", "typeZ")); + Set types = new HashSet<>(); + for (SearchHit searchHit : response.getHits().getHits()) { + types.add(searchHit.getSourceAsMap().get("type").toString()); + } + assertThat(types.size(), equalTo(3)); + assertThat(types, containsInAnyOrder("typeX", "typeY", "typeZ")); + }); } public void testIndexTemplateWithAliasesInSource() { @@ -552,15 +543,16 @@ public void testIndexTemplateWithAliasesInSource() { assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(1)); - client().prepareIndex("test_index").setId("1").setSource("field", "value1").get(); - client().prepareIndex("test_index").setId("2").setSource("field", "value2").get(); + prepareIndex("test_index").setId("1").setSource("field", "value1").get(); + prepareIndex("test_index").setId("2").setSource("field", "value2").get(); refresh(); assertHitCount(prepareSearch("test_index"), 2L); - SearchResponse searchResponse = prepareSearch("my_alias").get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + assertResponse(prepareSearch("my_alias"), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + }); } public void testIndexTemplateWithAliasesSource() { @@ -587,16 +579,17 @@ public void testIndexTemplateWithAliasesSource() { assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(3)); - client().prepareIndex("test_index").setId("1").setSource("field", "value1").get(); - client().prepareIndex("test_index").setId("2").setSource("field", "value2").get(); + prepareIndex("test_index").setId("1").setSource("field", "value1").get(); + prepareIndex("test_index").setId("2").setSource("field", "value2").get(); refresh(); assertHitCount(prepareSearch("test_index"), 2L); assertHitCount(prepareSearch("alias1"), 2L); - SearchResponse searchResponse = prepareSearch("alias2").get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + assertResponse(prepareSearch("alias2"), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); + }); } public void testDuplicateAlias() throws Exception { @@ -723,7 +716,7 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio .addAlias(new Alias("alias4").filter(termQuery("field", "value"))) .get(); - client().prepareIndex("a1").setId("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("a1").setId("test").setSource("{}", XContentType.JSON).get(); BulkResponse response = client().prepareBulk().add(new IndexRequest("a2").id("test").source("{}", XContentType.JSON)).get(); assertThat(response.hasFailures(), is(false)); assertThat(response.getItems()[0].isFailed(), equalTo(false)); @@ -739,7 +732,7 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio // So the aliases defined in the index template for this index will not fail // even though the fields in the alias fields don't exist yet and indexing into // an index that doesn't exist yet will succeed - client().prepareIndex("b1").setId("test").setSource("{}", XContentType.JSON).get(); + prepareIndex("b1").setId("test").setSource("{}", XContentType.JSON).get(); response = client().prepareBulk().add(new IndexRequest("b2").id("test").source("{}", XContentType.JSON)).get(); assertThat(response.hasFailures(), is(false)); @@ -842,33 +835,31 @@ public void testMultipleTemplate() throws IOException { ) .get(); - client().prepareIndex("ax").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("ax").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("bx").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("bx").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(); // ax -> matches template - SearchResponse searchResponse = prepareSearch("ax").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - - assertHitCount(searchResponse, 1); - assertEquals("value1", searchResponse.getHits().getAt(0).field("field1").getValue().toString()); - assertNull(searchResponse.getHits().getAt(0).field("field2")); + assertResponse( + prepareSearch("ax").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + response -> { + assertHitCount(response, 1); + assertEquals("value1", response.getHits().getAt(0).field("field1").getValue().toString()); + assertNull(response.getHits().getAt(0).field("field2")); + } + ); // bx -> matches template - searchResponse = prepareSearch("bx").setQuery(termQuery("field1", "value1")) - .addStoredField("field1") - .addStoredField("field2") - .execute() - .actionGet(); - - assertHitCount(searchResponse, 1); - assertEquals("value1", searchResponse.getHits().getAt(0).field("field1").getValue().toString()); - assertNull(searchResponse.getHits().getAt(0).field("field2")); + assertResponse( + prepareSearch("bx").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), + response -> { + assertHitCount(response, 1); + assertEquals("value1", response.getHits().getAt(0).field("field1").getValue().toString()); + assertNull(response.getHits().getAt(0).field("field2")); + } + ); } public void testPartitionedTemplate() throws Exception { @@ -995,7 +986,7 @@ public void testIndexTemplatesWithSameSubfield() { """, XContentType.JSON) .get(); - client().prepareIndex("test").setSource().get(); + prepareIndex("test").setSource().get(); FieldCapabilitiesResponse fieldCapabilitiesResponse = client().prepareFieldCaps("test").setFields("*location").get(); { Map field = fieldCapabilitiesResponse.getField("kwm.source.geo.location"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java index 04194238bd9ff..afc39cd6b4d7e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java @@ -230,15 +230,13 @@ public void test() throws Exception { assertThat(getResponse.pipelines().size(), equalTo(1)); assertThat(getResponse.pipelines().get(0).getId(), equalTo("_id")); - client().prepareIndex("test").setId("1").setPipeline("_id").setSource("field", "value", "fail", false).get(); + prepareIndex("test").setId("1").setPipeline("_id").setSource("field", "value", "fail", false).get(); Map doc = client().prepareGet("test", "1").get().getSourceAsMap(); assertThat(doc.get("field"), equalTo("value")); assertThat(doc.get("processed"), equalTo(true)); - client().prepareBulk() - .add(client().prepareIndex("test").setId("2").setSource("field", "value2", "fail", false).setPipeline("_id")) - .get(); + client().prepareBulk().add(prepareIndex("test").setId("2").setSource("field", "value2", "fail", false).setPipeline("_id")).get(); doc = client().prepareGet("test", "2").get().getSourceAsMap(); assertThat(doc.get("field"), equalTo("value2")); assertThat(doc.get("processed"), equalTo(true)); @@ -290,7 +288,7 @@ public void testWithDedicatedMaster() throws Exception { clusterAdmin().putPipeline(putPipelineRequest).get(); BulkItemResponse item = client(masterOnlyNode).prepareBulk() - .add(client().prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id")) + .add(prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id")) .get() .getItems()[0]; assertFalse(item.isFailed()); @@ -422,7 +420,7 @@ public void testPipelineProcessorOnFailure() throws Exception { clusterAdmin().putPipeline(putPipelineRequest).get(); } - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setPipeline("1").get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setPipeline("1").get(); Map inserted = client().prepareGet("test", "1").get().getSourceAsMap(); assertThat(inserted.get("readme"), equalTo("pipeline with id [3] is a bad pipeline")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java index 6c6d59844d484..03e8edbf1173d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/mget/SimpleMgetIT.java @@ -37,8 +37,7 @@ public class SimpleMgetIT extends ESIntegTestCase { public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -75,8 +74,7 @@ public void testThatMgetShouldWorkWithMultiIndexAlias() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("multiIndexAlias"))); assertAcked(prepareCreate("test2").addAlias(new Alias("multiIndexAlias"))); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -115,8 +113,7 @@ public void testThatMgetShouldWorkWithAliasRouting() throws IOException { ) ); - client().prepareIndex("alias1") - .setId("1") + prepareIndex("alias1").setId("1") .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -142,7 +139,7 @@ public void testThatSourceFilteringIsSupported() throws Exception { .endObject() ); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource(sourceBytesRef, XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(sourceBytesRef, XContentType.JSON).get(); } MultiGetRequestBuilder request = client().prepareMultiGet(); @@ -173,7 +170,7 @@ public void testThatSourceFilteringIsSupported() throws Exception { assertThat(((Map) source.get("included")).size(), equalTo(1)); assertThat(((Map) source.get("included")), hasKey("field")); } else { - assertThat(responseItem.getResponse().getSourceAsBytes(), nullValue()); + assertThat(responseItem.getResponse().getSourceAsBytesRef(), nullValue()); } } } @@ -189,8 +186,7 @@ public void testThatRoutingPerDocumentIsSupported() throws Exception { final String id = routingKeyForShard("test", 0); final String routingOtherShard = routingKeyForShard("test", 1); - client().prepareIndex("test") - .setId(id) + prepareIndex("test").setId(id) .setRefreshPolicy(IMMEDIATE) .setRouting(routingOtherShard) .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index 15225edc47a60..cafc0e9426eea 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -41,7 +41,7 @@ public void testNodesInfos() throws Exception { String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().getLocalNodeId(); logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); - NodesInfoResponse response = clusterAdmin().prepareNodesInfo().execute().actionGet(); + NodesInfoResponse response = clusterAdmin().prepareNodesInfo().get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertThat(response.getNodesMap().get(server2NodeId), notNullValue()); @@ -80,7 +80,7 @@ public void testNodesInfosTotalIndexingBuffer() throws Exception { String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().getLocalNodeId(); logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); - NodesInfoResponse response = clusterAdmin().prepareNodesInfo().execute().actionGet(); + NodesInfoResponse response = clusterAdmin().prepareNodesInfo().get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer()); @@ -91,7 +91,7 @@ public void testNodesInfosTotalIndexingBuffer() throws Exception { assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L)); // again, using only the indices flag - response = clusterAdmin().prepareNodesInfo().clear().setIndices(true).execute().actionGet(); + response = clusterAdmin().prepareNodesInfo().clear().setIndices(true).get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer()); @@ -118,7 +118,7 @@ public void testAllocatedProcessors() throws Exception { String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().getLocalNodeId(); logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); - NodesInfoResponse response = clusterAdmin().prepareNodesInfo().execute().actionGet(); + NodesInfoResponse response = clusterAdmin().prepareNodesInfo().get(); assertThat(response.getNodes(), hasSize(2)); assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index ab24bf923b9db..450b27eb0db8b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -99,16 +99,13 @@ protected Collection> getMockPlugins() { } private void assertMasterNode(Client client, String node) { - assertThat( - client.admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), - equalTo(node) - ); + assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); } private void expectMasterNotFound() { expectThrows( MasterNotDiscoveredException.class, - () -> clusterAdmin().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId() + () -> clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java index f4aa261b09625..1e67a38c76017 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -49,19 +49,11 @@ public void testFullRollingRestart() throws Exception { final String healthTimeout = "1m"; for (int i = 0; i < 1000; i++) { - client().prepareIndex("test") - .setId(Long.toString(i)) - .setSource(Map.of("test", "value" + i)) - .execute() - .actionGet(); + prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } flush(); for (int i = 1000; i < 2000; i++) { - client().prepareIndex("test") - .setId(Long.toString(i)) - .setSource(Map.of("test", "value" + i)) - .execute() - .actionGet(); + prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } logger.info("--> now start adding nodes"); @@ -173,11 +165,7 @@ public void testNoRebalanceOnRollingRestart() throws Exception { ).get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Long.toString(i)) - .setSource(Map.of("test", "value" + i)) - .execute() - .actionGet(); + prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } ensureGreen(); ClusterState state = clusterAdmin().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index fceeb2013b7c5..d47c68690bab8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -41,8 +41,8 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { @@ -85,7 +85,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { indexer.continueIndexing(extraDocs); logger.info("--> flushing the index ...."); // now flush, just to make sure we have some data in the index, not just translog - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> waiting for {} docs to be indexed ...", waitFor); waitForDocs(waitFor, indexer); @@ -144,7 +144,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr indexer.continueIndexing(extraDocs); logger.info("--> flushing the index ...."); // now flush, just to make sure we have some data in the index, not just translog - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> waiting for {} docs to be indexed ...", waitFor); waitForDocs(waitFor, indexer); @@ -200,7 +200,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception indexer.continueIndexing(extraDocs); logger.info("--> flushing the index ...."); // now flush, just to make sure we have some data in the index, not just translog - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> waiting for {} docs to be indexed ...", waitFor); waitForDocs(waitFor, indexer); @@ -313,22 +313,23 @@ public void testRecoverWhileRelocating() throws Exception { private void iterateAssertCount(final int numberOfShards, final int iterations, final Set ids) throws Exception { final long numberOfDocs = ids.size(); - SearchResponse[] iterationResults = new SearchResponse[iterations]; - boolean error = false; + long[] iterationHitCount = new long[iterations]; + boolean[] error = new boolean[1]; for (int i = 0; i < iterations; i++) { - SearchResponse searchResponse = prepareSearch().setSize((int) numberOfDocs) - .setQuery(matchAllQuery()) - .setTrackTotalHits(true) - .addSort("id", SortOrder.ASC) - .get(); - logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse); - iterationResults[i] = searchResponse; - if (searchResponse.getHits().getTotalHits().value != numberOfDocs) { - error = true; - } + final int finalI = i; + assertResponse( + prepareSearch().setSize((int) numberOfDocs).setQuery(matchAllQuery()).setTrackTotalHits(true).addSort("id", SortOrder.ASC), + response -> { + logSearchResponse(numberOfShards, numberOfDocs, finalI, response); + iterationHitCount[finalI] = response.getHits().getTotalHits().value; + if (iterationHitCount[finalI] != numberOfDocs) { + error[0] = true; + } + } + ); } - if (error) { + if (error[0]) { // Printing out shards and their doc count IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats().get(); for (ShardStats shardStats : indicesStatsResponse.getShards()) { @@ -364,21 +365,22 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, // if there was an error we try to wait and see if at some point it'll get fixed logger.info("--> trying to wait"); assertBusy(() -> { - boolean errorOccurred = false; + boolean[] errorOccurred = new boolean[1]; for (int i = 0; i < iterations; i++) { - SearchResponse searchResponse = prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(matchAllQuery()).get(); - if (searchResponse.getHits().getTotalHits().value != numberOfDocs) { - errorOccurred = true; - } + assertResponse(prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(matchAllQuery()), response -> { + if (response.getHits().getTotalHits().value != numberOfDocs) { + errorOccurred[0] = true; + } + }); } - assertFalse("An error occurred while waiting", errorOccurred); + assertFalse("An error occurred while waiting", errorOccurred[0]); }, 5, TimeUnit.MINUTES); assertEquals(numberOfDocs, ids.size()); } // lets now make the test fail if it was supposed to fail for (int i = 0; i < iterations; i++) { - assertHitCount(iterationResults[i], numberOfDocs); + assertEquals(iterationHitCount[i], numberOfDocs); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java index 9e04413bfb014..e53bcb0480d7b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -80,6 +79,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -122,42 +122,40 @@ public void testSimpleRelocationNoIndexing() { logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> flush so we have an actual index"); - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> index more docs so we have something in the translog"); for (int i = 10; i < 20; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> verifying count"); - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(20L)); logger.info("--> start another node"); final String node_2 = internalCluster().startNode(); ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).execute().actionGet(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get(); clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(20L)); } public void testRelocationWhileIndexingRandom() throws Exception { @@ -187,8 +185,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i)) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } @@ -219,8 +216,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); indexer.pauseIndexing(); logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); @@ -231,7 +227,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - indicesAdmin().prepareRefresh("test").execute().actionGet(); + indicesAdmin().prepareRefresh("test").get(); logger.info("--> searching the index"); boolean ranOnce = false; for (int i = 0; i < 10; i++) { @@ -239,8 +235,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { SearchHits hits = prepareSearch("test").setQuery(matchAllQuery()) .setSize((int) indexer.totalIndexedDocs()) .storedFields() - .execute() - .actionGet() + .get() .getHits(); ranOnce = true; if (hits.getTotalHits().value != indexer.totalIndexedDocs()) { @@ -298,8 +293,7 @@ public void testRelocationWhileRefreshing() throws Exception { .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i + 1)) .setWaitForGreenStatus() - .execute() - .actionGet(); + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } @@ -334,12 +328,12 @@ public void indexShardStateChanged( List builders1 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { - builders1.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); + builders1.add(prepareIndex("test").setSource("{}", XContentType.JSON)); } List builders2 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { - builders2.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); + builders2.add(prepareIndex("test").setSource("{}", XContentType.JSON)); } logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]); @@ -366,15 +360,15 @@ public void indexShardStateChanged( logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); logger.debug("--> verifying all searches return the same number of docs"); - long expectedCount = -1; + long[] expectedCount = new long[] { -1 }; for (Client client : clients()) { - SearchResponse response = client.prepareSearch("test").setPreference("_local").setSize(0).get(); - assertNoFailures(response); - if (expectedCount < 0) { - expectedCount = response.getHits().getTotalHits().value; - } else { - assertEquals(expectedCount, response.getHits().getTotalHits().value); - } + assertNoFailuresAndResponse(client.prepareSearch("test").setPreference("_local").setSize(0), response -> { + if (expectedCount[0] < 0) { + expectedCount[0] = response.getHits().getTotalHits().value; + } else { + assertEquals(expectedCount[0], response.getHits().getTotalHits().value); + } + }); } } @@ -394,7 +388,7 @@ public void testCancellationCleansTempFiles() throws Exception { List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); @@ -497,7 +491,7 @@ public void testIndexSearchAndRelocateConcurrently() throws Exception { for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(i); ids.add(id); - docs[i] = client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i)); + docs[i] = prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i)); } indexRandom(true, docs); assertHitCount(prepareSearch("test"), numDocs); @@ -512,7 +506,7 @@ public void testIndexSearchAndRelocateConcurrently() throws Exception { for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(numDocs + i); ids.add(id); - docs[i] = client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(numDocs + i)); + docs[i] = prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(numDocs + i)); } indexRandom(true, docs); @@ -544,14 +538,13 @@ public void testRelocateWhileWaitingForRefresh() { logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> flush so we have an actual index"); - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> index more docs so we have something in the translog"); for (int i = 10; i < 20; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i) .execute(); @@ -562,24 +555,22 @@ public void testRelocateWhileWaitingForRefresh() { ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); - clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node1, node2)).execute().actionGet(); + clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node1, node2)).get(); clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); - indicesAdmin().prepareRefresh().execute().actionGet(); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); + indicesAdmin().prepareRefresh().get(); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(20L)); } public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws Exception { @@ -595,16 +586,15 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get(); } logger.info("--> flush so we have an actual index"); - indicesAdmin().prepareFlush().execute().actionGet(); + indicesAdmin().prepareFlush().get(); logger.info("--> index more docs so we have something in the translog"); final List> pendingIndexResponses = new ArrayList<>(); for (int i = 10; i < 20; i++) { pendingIndexResponses.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i) .execute() @@ -616,8 +606,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); @@ -627,8 +616,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> index 100 docs while relocating"); for (int i = 20; i < 120; i++) { pendingIndexResponses.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) .setSource("field", "value" + i) .execute() @@ -639,17 +627,16 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) - .execute() - .actionGet(); + .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); assertBusy(() -> { - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); assertTrue(pendingIndexResponses.stream().allMatch(ActionFuture::isDone)); }, 1, TimeUnit.MINUTES); - assertThat(prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(120L)); + assertThat(prepareSearch("test").setSize(0).get().getHits().getTotalHits().value, equalTo(120L)); } public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java index e11f443f6c5b3..bd69aebcd415e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java @@ -38,7 +38,7 @@ protected int maximumNumberOfReplicas() { } public void testSimpleRecovery() throws Exception { - assertAcked(prepareCreate("test", 1).execute().actionGet()); + assertAcked(prepareCreate("test", 1).get()); NumShards numShards = getNumShards("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 6281df7fc6646..28c56e0cdc916 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -88,7 +88,7 @@ public void testCancelRecoveryAndResume() throws Exception { List builder = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); - builder.add(client().prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i), "the_id", id)); + builder.add(prepareIndex("test").setId(id).setSource("field1", English.intToEnglish(i), "the_id", id)); } indexRandom(true, builder); for (int i = 0; i < numDocs; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index be8053a1d6866..f77cc9ce20020 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -137,7 +137,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final SnapshotInfo snapshotInfo = createSnapshot(repoName, Strings.format("snap-%03d", i), snapshotIndices); if (snapshotInfo.indices().contains(indexName)) { lastSnapshot = snapshotInfo; - ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().execute().actionGet(); + ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().get(); IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(indexName); expectedIndexMetadataId = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetadata); } @@ -338,7 +338,7 @@ private PlainActionFuture getLatestSnapshotForShardFut boolean useAllRepositoriesRequest ) { ShardId shardId = new ShardId(new Index(indexName, "__na__"), shard); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); final GetShardSnapshotRequest request; if (useAllRepositoriesRequest && randomBoolean()) { request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 7d444eef787c0..0b1802fc71470 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -90,7 +90,7 @@ private ActionFuture startBlockedCleanup(String repoN final BlobStoreRepository repository = getRepositoryOnMaster(repoName); logger.info("--> creating a garbage data blob"); - final PlainActionFuture garbageFuture = PlainActionFuture.newFuture(); + final PlainActionFuture garbageFuture = new PlainActionFuture<>(); repository.threadPool() .generic() .execute( @@ -137,7 +137,7 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti final BlobStoreRepository repository = getRepositoryOnMaster(repoName); logger.info("--> write two outdated index-N blobs"); for (int i = 0; i < 2; ++i) { - final PlainActionFuture createOldIndexNFuture = PlainActionFuture.newFuture(); + final PlainActionFuture createOldIndexNFuture = new PlainActionFuture<>(); final int generation = i; repository.threadPool() .generic() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index cf47fbe95da24..58dcfdaec5147 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -100,10 +100,7 @@ public class FileSettingsServiceIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) { - assertThat( - client.admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), - equalTo(node) - ); + assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); } private void writeJSONFile(String node, String json) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index 478cae8746f86..53001e30763a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -37,9 +37,9 @@ public void testSearchClosedWildcardIndex() throws ExecutionException, Interrupt indicesAdmin().prepareClose("test-1").get(); indexRandom( true, - client().prepareIndex("test-0").setId("1").setSource("field1", "the quick brown fox jumps"), - client().prepareIndex("test-0").setId("2").setSource("field1", "quick brown"), - client().prepareIndex("test-0").setId("3").setSource("field1", "quick") + prepareIndex("test-0").setId("1").setSource("field1", "the quick brown fox jumps"), + prepareIndex("test-0").setId("2").setSource("field1", "quick brown"), + prepareIndex("test-0").setId("3").setSource("field1", "quick") ); refresh("test-*"); assertHitCount( @@ -51,7 +51,7 @@ public void testSearchClosedWildcardIndex() throws ExecutionException, Interrupt public void testResolveIndexRouting() { createIndex("test1"); createIndex("test2"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) @@ -93,7 +93,7 @@ public void testResolveSearchRouting() { createIndex("test1"); createIndex("test2"); createIndex("test3"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java index 7ee081ffd433e..442a2dc99bda3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.routing; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.internal.Requests; @@ -20,6 +19,7 @@ import org.elasticsearch.xcontent.XContentFactory; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; /** @@ -38,61 +38,57 @@ public void testAliasCrudRouting() throws Exception { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("test").alias("alias0").routing("0"))); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(true)); } logger.info("--> verifying get with routing alias, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } logger.info("--> updating with id [1] and routing through alias"); client().prepareUpdate("alias0", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2") - .execute() - .actionGet(); + .get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); - assertThat( - client().prepareGet("alias0", "1").execute().actionGet().getSourceAsMap().get("field").toString(), - equalTo("value2") - ); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().getSourceAsMap().get("field").toString(), equalTo("value2")); } logger.info("--> deleting with no routing, should not delete anything"); client().prepareDelete("test", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } logger.info("--> deleting with routing alias, should delete"); client().prepareDelete("alias0", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("0").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } } @@ -108,59 +104,37 @@ public void testAliasSearchRouting() throws Exception { ); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias0", "1").get().isExists(), equalTo(true)); } logger.info("--> search with no routing, should fine one"); for (int i = 0; i < 5; i++) { - assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); } logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); - assertThat( - prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(0L) - ); + assertThat(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L)); assertThat( - prepareSearch("alias1").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); } @@ -169,50 +143,28 @@ public void testAliasSearchRouting() throws Exception { for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); + assertThat(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch("alias0").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } logger.info("--> indexing with id [2], and routing [1] using alias"); - client().prepareIndex("alias1").setId("2").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias1").setId("2").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -220,35 +172,16 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with 0 routing, should find one"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch().setSize(0) - .setRouting("0") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( - prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); + assertThat(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias0").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } @@ -256,35 +189,16 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with 1 routing, should find one"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); + assertThat(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch("alias1").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } @@ -292,35 +206,21 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with 0,1 indexRoutings , should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("0", "1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch().setSize(0) .setRouting("0", "1") .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) ); + assertThat(prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch("alias01").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -328,20 +228,11 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with two routing aliases , should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( - prepareSearch("alias0", "alias1").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -349,18 +240,13 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with alias0, alias1 and alias01, should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch("alias0", "alias1", "alias01").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -370,18 +256,13 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with test, alias0 and alias1, should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch("test", "alias0", "alias1").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -412,42 +293,37 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { ); ensureGreen(); // wait for events again to make sure we got the aliases on all nodes logger.info("--> indexing with id [1], and routing [0] using alias to test-a"); - client().prepareIndex("alias-a0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias-a0").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test-a", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test-a", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias-a0", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias-a0", "1").get().isExists(), equalTo(true)); } logger.info("--> indexing with id [0], and routing [1] using alias to test-b"); - client().prepareIndex("alias-b1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias-b1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test-a", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test-a", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("alias-b1", "1").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("alias-b1", "1").get().isExists(), equalTo(true)); } logger.info("--> search with alias-a1,alias-b0, should not find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); assertThat( prepareSearch("alias-a1", "alias-b0").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(0L) @@ -456,17 +332,9 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { logger.info("--> search with alias-ab, should find two"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch("alias-ab").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias-ab").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -474,18 +342,13 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { logger.info("--> search with alias-a0,alias-b1 should find two"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); assertThat( prepareSearch("alias-a0", "alias-b1").setSize(0) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -505,16 +368,13 @@ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() thro assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("index").alias("index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search all on index_* should find two"); for (int i = 0; i < 5; i++) { - assertThat( - prepareSearch("index_*").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); + assertThat(prepareSearch("index_*").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); } } @@ -531,21 +391,20 @@ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() thro assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("index").alias("index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("index_1").setId("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - - SearchResponse searchResponse = prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH) - .setSize(1) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet(); - - logger.info("--> search all on index_* should find two"); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - // Let's make sure that, even though 2 docs are available, only one is returned according to the size we set in the request - // Therefore the reduce phase has taken place, which proves that the QUERY_AND_FETCH search type wasn't erroneously forced. - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + prepareIndex("index_2").setId("2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + + assertResponse( + prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.matchAllQuery()), + response -> { + logger.info("--> search all on index_* should find two"); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + // Let's make sure that, even though 2 docs are available, only one is returned according to the size we set in the request + // Therefore the reduce phase has taken place, which proves that the QUERY_AND_FETCH search type wasn't erroneously forced. + assertThat(response.getHits().getHits().length, equalTo(1)); + } + ); } public void testIndexingAliasesOverTime() throws Exception { @@ -555,23 +414,15 @@ public void testIndexingAliasesOverTime() throws Exception { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("test").alias("alias").routing("3"))); logger.info("--> indexing with id [0], and routing [3]"); - client().prepareIndex("alias").setId("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias").setId("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); + assertThat(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); assertThat( - prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); - assertThat( - prepareSearch("alias").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); } @@ -581,17 +432,9 @@ public void testIndexingAliasesOverTime() throws Exception { logger.info("--> verifying search with wrong routing should not find"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L)); assertThat( - prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(0L) - ); - assertThat( - prepareSearch("alias").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); } @@ -603,24 +446,16 @@ public void testIndexingAliasesOverTime() throws Exception { ); logger.info("--> indexing with id [1], and routing [4]"); - client().prepareIndex("alias").setId("1").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + prepareIndex("alias").setId("1").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("test", "1").setRouting("4").execute().actionGet().isExists(), equalTo(true)); - assertThat( - prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); + assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting("4").get().isExists(), equalTo(true)); + assertThat(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch("alias").setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java index 4b685ca2699be..e25da54d7b214 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.routing; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -23,6 +22,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; public class PartitionedRoutingIT extends ESIntegTestCase { @@ -40,8 +40,7 @@ public void testVariousPartitionSizes() throws Exception { .put("index.routing_partition_size", partitionSize) ) .setMapping("{\"_routing\":{\"required\":true}}") - .execute() - .actionGet(); + .get(); ensureGreen(); Map> routingToDocumentIds = generateRoutedDocumentIds(index); @@ -69,8 +68,7 @@ public void testShrinking() throws Exception { .put("index.routing_partition_size", partitionSize) ) .setMapping("{\"_routing\":{\"required\":true}}") - .execute() - .actionGet(); + .get(); ensureGreen(); Map> routingToDocumentIds = generateRoutedDocumentIds(index); @@ -145,35 +143,33 @@ private void verifyRoutedSearches(String index, Map> routing String routing = routingEntry.getKey(); int expectedDocuments = routingEntry.getValue().size(); - SearchResponse response = prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)) - .setRouting(routing) - .setIndices(index) - .setSize(100) - .execute() - .actionGet(); - - logger.info( - "--> routed search on index [" - + index - + "] visited [" - + response.getTotalShards() - + "] shards for routing [" - + routing - + "] and got hits [" - + response.getHits().getTotalHits().value - + "]" + assertResponse( + prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)).setRouting(routing).setIndices(index).setSize(100), + response -> { + logger.info( + "--> routed search on index [" + + index + + "] visited [" + + response.getTotalShards() + + "] shards for routing [" + + routing + + "] and got hits [" + + response.getHits().getTotalHits().value + + "]" + ); + + assertTrue( + response.getTotalShards() + " was not in " + expectedShards + " for " + index, + expectedShards.contains(response.getTotalShards()) + ); + assertEquals(expectedDocuments, response.getHits().getTotalHits().value); + + Set found = new HashSet<>(); + response.getHits().forEach(h -> found.add(h.getId())); + + assertEquals(routingEntry.getValue(), found); + } ); - - assertTrue( - response.getTotalShards() + " was not in " + expectedShards + " for " + index, - expectedShards.contains(response.getTotalShards()) - ); - assertEquals(expectedDocuments, response.getHits().getTotalHits().value); - - Set found = new HashSet<>(); - response.getHits().forEach(h -> found.add(h.getId())); - - assertEquals(routingEntry.getValue(), found); } } @@ -182,19 +178,18 @@ private void verifyBroadSearches(String index, Map> routingT String routing = routingEntry.getKey(); int expectedDocuments = routingEntry.getValue().size(); - SearchResponse response = prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)) - .setIndices(index) - .setSize(100) - .execute() - .actionGet(); - - assertEquals(expectedShards, response.getTotalShards()); - assertEquals(expectedDocuments, response.getHits().getTotalHits().value); + assertResponse( + prepareSearch().setQuery(QueryBuilders.termQuery("_routing", routing)).setIndices(index).setSize(100), + response -> { + assertEquals(expectedShards, response.getTotalShards()); + assertEquals(expectedDocuments, response.getHits().getTotalHits().value); - Set found = new HashSet<>(); - response.getHits().forEach(h -> found.add(h.getId())); + Set found = new HashSet<>(); + response.getHits().forEach(h -> found.add(h.getId())); - assertEquals(routingEntry.getValue(), found); + assertEquals(routingEntry.getValue(), found); + } + ); } } @@ -203,7 +198,7 @@ private void verifyGets(String index, Map> routingToDocument String routing = routingEntry.getKey(); for (String id : routingEntry.getValue()) { - assertTrue(client().prepareGet(index, id).setRouting(routing).execute().actionGet().isExists()); + assertTrue(client().prepareGet(index, id).setRouting(routing).get().isExists()); } } } @@ -221,7 +216,7 @@ private Map> generateRoutedDocumentIds(String index) { String id = routingValue + "_" + String.valueOf(k); routingToDocumentIds.get(routingValue).add(id); - client().prepareIndex(index).setId(id).setRouting(routingValue).setSource("foo", "bar").get(); + prepareIndex(index).setId(id).setRouting(routingValue).setSource("foo", "bar").get(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java index 93b1ac68be6a5..772d8767b7dd0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -68,49 +68,47 @@ public void testSimpleCrudRouting() throws Exception { ensureGreen(); String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> deleting with no routing, should not delete anything"); client().prepareDelete("test", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> deleting with routing, should delete"); client().prepareDelete("test", "1").setRouting(routingValue).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } } @@ -120,48 +118,33 @@ public void testSimpleSearchRouting() { String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet("test", "1").get().isExists(), equalTo(false)); } logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet("test", "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> search with no routing, should fine one"); for (int i = 0; i < 5; i++) { - assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(1L) - ); + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L)); } logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); assertThat( - prepareSearch().setSize(0) - .setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(0L) ); } @@ -169,20 +152,14 @@ public void testSimpleSearchRouting() { logger.info("--> search with correct routing, should find"); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting(routingValue) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( prepareSearch().setSize(0) .setRouting(routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(1L) @@ -191,8 +168,7 @@ public void testSimpleSearchRouting() { String secondRoutingValue = "1"; logger.info("--> indexing with id [{}], and routing [{}]", routingValue, secondRoutingValue); - client().prepareIndex("test") - .setId(routingValue) + prepareIndex("test").setId(routingValue) .setRouting(secondRoutingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) @@ -200,12 +176,9 @@ public void testSimpleSearchRouting() { logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { + assertThat(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L)); assertThat( - prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, - equalTo(2L) - ); - assertThat( - prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits().value, + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(2L) ); } @@ -213,20 +186,14 @@ public void testSimpleSearchRouting() { logger.info("--> search with {} routing, should find one", routingValue); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting(routingValue) - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( prepareSearch().setSize(0) .setRouting(routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(1L) @@ -236,20 +203,14 @@ public void testSimpleSearchRouting() { logger.info("--> search with {} routing, should find one", secondRoutingValue); for (int i = 0; i < 5; i++) { assertThat( - prepareSearch().setRouting("1") - .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() - .getHits() - .getTotalHits().value, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).get().getHits().getTotalHits().value, equalTo(1L) ); assertThat( prepareSearch().setSize(0) .setRouting(secondRoutingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(1L) @@ -261,8 +222,7 @@ public void testSimpleSearchRouting() { assertThat( prepareSearch().setRouting(routingValue, secondRoutingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -271,8 +231,7 @@ public void testSimpleSearchRouting() { prepareSearch().setSize(0) .setRouting(routingValue, secondRoutingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -284,8 +243,7 @@ public void testSimpleSearchRouting() { assertThat( prepareSearch().setRouting(routingValue, secondRoutingValue, routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -294,8 +252,7 @@ public void testSimpleSearchRouting() { prepareSearch().setSize(0) .setRouting(routingValue, secondRoutingValue, routingValue) .setQuery(QueryBuilders.matchAllQuery()) - .execute() - .actionGet() + .get() .getHits() .getTotalHits().value, equalTo(2L) @@ -316,14 +273,12 @@ public void testRequiredRoutingCrudApis() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); ensureGreen(); String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex(indexOrAlias()) - .setId("1") + prepareIndex(indexOrAlias()).setId("1") .setRouting(routingValue) .setSource("field", "value1") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) @@ -332,7 +287,7 @@ public void testRequiredRoutingCrudApis() throws Exception { logger.info("--> indexing with id [1], with no routing, should fail"); try { - client().prepareIndex(indexOrAlias()).setId("1").setSource("field", "value1").get(); + prepareIndex(indexOrAlias()).setId("1").setSource("field", "value1").get(); fail("index with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); @@ -340,7 +295,7 @@ public void testRequiredRoutingCrudApis() throws Exception { logger.info("--> verifying get with routing, should find"); for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(true)); } logger.info("--> deleting with no routing, should fail"); @@ -353,34 +308,34 @@ public void testRequiredRoutingCrudApis() throws Exception { for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").get().isExists(); fail("get with missing routing when routing is required should fail"); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(true)); } try { - client().prepareUpdate(indexOrAlias(), "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").execute().actionGet(); + client().prepareUpdate(indexOrAlias(), "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").get(); fail("update with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); } client().prepareUpdate(indexOrAlias(), "1").setRouting(routingValue).setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").get(); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").get().isExists(); fail(); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - GetResponse getResponse = client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet(); + GetResponse getResponse = client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsMap().get("field"), equalTo("value2")); } @@ -389,13 +344,13 @@ public void testRequiredRoutingCrudApis() throws Exception { for (int i = 0; i < 5; i++) { try { - client().prepareGet(indexOrAlias(), "1").execute().actionGet().isExists(); + client().prepareGet(indexOrAlias(), "1").get().isExists(); fail(); } catch (RoutingMissingException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.getMessage(), equalTo("routing is required for [test]/[1]")); } - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(false)); } } @@ -412,15 +367,13 @@ public void testRequiredRoutingBulk() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); ensureGreen(); { String index = indexOrAlias(); BulkResponse bulkResponse = client().prepareBulk() .add(new IndexRequest(index).id("1").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -437,16 +390,14 @@ public void testRequiredRoutingBulk() throws Exception { String index = indexOrAlias(); BulkResponse bulkResponse = client().prepareBulk() .add(new IndexRequest(index).id("1").routing("0").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); } { BulkResponse bulkResponse = client().prepareBulk() .add(new UpdateRequest(indexOrAlias(), "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -462,14 +413,13 @@ public void testRequiredRoutingBulk() throws Exception { { BulkResponse bulkResponse = client().prepareBulk() .add(new UpdateRequest(indexOrAlias(), "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2").routing("0")) - .execute() - .actionGet(); + .get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); } { String index = indexOrAlias(); - BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1")).execute().actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1")).get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(true)); @@ -484,7 +434,7 @@ public void testRequiredRoutingBulk() throws Exception { { String index = indexOrAlias(); - BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1").routing("0")).execute().actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(new DeleteRequest(index).id("1").routing("0")).get(); assertThat(bulkResponse.getItems().length, equalTo(1)); assertThat(bulkResponse.hasFailures(), equalTo(false)); } @@ -504,22 +454,20 @@ public void testRequiredRoutingMappingVariousAPIs() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); ensureGreen(); String routingValue = findNonMatchingRoutingValue("test", "1"); logger.info("--> indexing with id [1], and routing [{}]", routingValue); - client().prepareIndex(indexOrAlias()).setId("1").setRouting(routingValue).setSource("field", "value1").get(); + prepareIndex(indexOrAlias()).setId("1").setRouting(routingValue).setSource("field", "value1").get(); logger.info("--> indexing with id [2], and routing [{}]", routingValue); - client().prepareIndex(indexOrAlias()) - .setId("2") + prepareIndex(indexOrAlias()).setId("2") .setRouting(routingValue) .setSource("field", "value2") .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); logger.info("--> verifying get with id [1] with routing [0], should succeed"); - assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true)); + assertThat(client().prepareGet(indexOrAlias(), "1").setRouting(routingValue).get().isExists(), equalTo(true)); logger.info("--> verifying get with id [1], with no routing, should fail"); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index ec01e34976058..ad610954e86b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -8,19 +8,20 @@ package org.elasticsearch.search; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.script.Script; @@ -50,6 +51,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102257") public class SearchCancellationIT extends AbstractSearchCancellationTestCase { @Override @@ -69,7 +71,7 @@ public void testCancellationDuringQueryPhase() throws Exception { ).execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); logger.info("Segments {}", Strings.toString(indicesAdmin().prepareSegments("test").get())); ensureSearchWasCancelled(searchResponse); @@ -87,7 +89,7 @@ public void testCancellationDuringFetchPhase() throws Exception { ).execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); logger.info("Segments {}", Strings.toString(indicesAdmin().prepareSegments("test").get())); ensureSearchWasCancelled(searchResponse); @@ -132,7 +134,7 @@ public void testCancellationDuringAggregation() throws Exception { ) .execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); ensureSearchWasCancelled(searchResponse); } @@ -149,7 +151,7 @@ public void testCancellationOfScrollSearches() throws Exception { .execute(); awaitForBlock(plugins); - cancelSearch(SearchAction.NAME); + cancelSearch(TransportSearchAction.TYPE.name()); disableBlocks(plugins); SearchResponse response = ensureSearchWasCancelled(searchResponse); if (response != null) { @@ -189,7 +191,7 @@ public void testCancellationOfScrollSearchesOnFollowupRequests() throws Exceptio .execute(); awaitForBlock(plugins); - cancelSearch(SearchScrollAction.NAME); + cancelSearch(TransportSearchScrollAction.TYPE.name()); disableBlocks(plugins); SearchResponse response = ensureSearchWasCancelled(scrollResponse); @@ -213,7 +215,7 @@ public void testCancelMultiSearch() throws Exception { ) .execute(); awaitForBlock(plugins); - cancelSearch(MultiSearchAction.NAME); + cancelSearch(TransportMultiSearchAction.TYPE.name()); disableBlocks(plugins); for (MultiSearchResponse.Item item : msearchResponse.actionGet()) { if (item.getFailure() != null) { @@ -300,7 +302,7 @@ List getCoordinatorSearchTasks() { for (String nodeName : internalCluster().getNodeNames()) { TransportService transportService = internalCluster().getInstance(TransportService.class, nodeName); for (Task task : transportService.getTaskManager().getCancellableTasks().values()) { - if (task.getAction().equals(SearchAction.NAME)) { + if (task.getAction().equals(TransportSearchAction.TYPE.name())) { tasks.add((SearchTask) task); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java index 1bcf2d8fb327f..ecf839bff5e4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java @@ -47,7 +47,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { private void indexDocs() { for (int i = 0; i < 32; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); } @@ -90,7 +90,7 @@ public void testAggsTimeout() { } public void testPartialResultsIntolerantTimeout() throws Exception { - client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); ElasticsearchException ex = expectThrows( ElasticsearchException.class, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java index 3202037c8486f..51c1269b87675 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithRejectionsIT.java @@ -36,7 +36,7 @@ public void testOpenContextsAfterRejections() throws Exception { ensureGreen("test"); final int docs = scaledRandomIntBetween(20, 50); for (int i = 0; i < docs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().get(); assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java index eec815d6957aa..c7aa4b3179288 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/StressSearchServiceReaperIT.java @@ -37,7 +37,7 @@ public void testStressReaper() throws ExecutionException, InterruptedException { int num = randomIntBetween(100, 150); IndexRequestBuilder[] builders = new IndexRequestBuilder[num]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test").setId("" + i).setSource("f", English.intToEnglish(i)); + builders[i] = prepareIndex("test").setId("" + i).setSource("f", English.intToEnglish(i)); } createIndex("test"); indexRandom(true, builders); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java index cc74dcc3d0d28..df8f3825a5ea6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.test.ESIntegTestCase; @@ -19,7 +18,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; @ESIntegTestCase.SuiteScopeTestCase public class AggregationsIntegrationIT extends ESIntegTestCase { @@ -32,32 +31,39 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(1, 20); List docs = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { - docs.add(client().prepareIndex("index").setSource("f", Integer.toString(i / 3))); + docs.add(prepareIndex("index").setSource("f", Integer.toString(i / 3))); } indexRandom(true, docs); } public void testScroll() { final int size = randomIntBetween(1, 4); - SearchResponse response = prepareSearch("index").setSize(size) - .setScroll(TimeValue.timeValueMinutes(1)) - .addAggregation(terms("f").field("f")) - .get(); - assertNoFailures(response); - Aggregations aggregations = response.getAggregations(); - assertNotNull(aggregations); - Terms terms = aggregations.get("f"); - assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); - - int total = response.getHits().getHits().length; - while (response.getHits().getHits().length > 0) { - response = client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); - assertNoFailures(response); - assertNull(response.getAggregations()); - total += response.getHits().getHits().length; + final String[] scroll = new String[1]; + final int[] total = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("index").setSize(size).setScroll(TimeValue.timeValueMinutes(1)).addAggregation(terms("f").field("f")), + response -> { + Aggregations aggregations = response.getAggregations(); + assertNotNull(aggregations); + Terms terms = aggregations.get("f"); + assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount()); + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + } + ); + int currentTotal = 0; + while (total[0] - currentTotal > 0) { + currentTotal = total[0]; + assertNoFailuresAndResponse( + client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(1)), + scrollResponse -> { + assertNull(scrollResponse.getAggregations()); + total[0] += scrollResponse.getHits().getHits().length; + scroll[0] = scrollResponse.getScrollId(); + } + ); } - clearScroll(response.getScrollId()); - assertEquals(numDocs, total); + clearScroll(scroll[0]); + assertEquals(numDocs, total[0]); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java index a0144d30a4728..fc0a93ad3d290 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; @@ -24,7 +23,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.missing; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -49,38 +48,40 @@ public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() t String name = "name_" + randomIntBetween(1, 10); if (rarely()) { missingValues++; - builders[i] = client().prepareIndex("idx").setSource(jsonBuilder().startObject().field("name", name).endObject()); + builders[i] = prepareIndex("idx").setSource(jsonBuilder().startObject().field("name", name).endObject()); } else { int value = randomIntBetween(1, 10); values.put(value, values.getOrDefault(value, 0) + 1); - builders[i] = client().prepareIndex("idx") - .setSource(jsonBuilder().startObject().field("name", name).field("value", value).endObject()); + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject().field("name", name).field("value", value).endObject() + ); } } indexRandom(true, builders); ensureSearchable(); + final long finalMissingValues = missingValues; SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse response = prepareSearch("idx").addAggregation(missing("missing_values").field("value")) - .addAggregation(terms("values").field("value").collectMode(aggCollectionMode)) - .get(); - - assertNoFailures(response); - - Aggregations aggs = response.getAggregations(); - - Missing missing = aggs.get("missing_values"); - assertNotNull(missing); - assertThat(missing.getDocCount(), equalTo(missingValues)); - - Terms terms = aggs.get("values"); - assertNotNull(terms); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(values.size())); - for (Terms.Bucket bucket : buckets) { - values.remove(((Number) bucket.getKey()).intValue()); - } - assertTrue(values.isEmpty()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(missing("missing_values").field("value")) + .addAggregation(terms("values").field("value").collectMode(aggCollectionMode)), + response -> { + Aggregations aggs = response.getAggregations(); + + Missing missing = aggs.get("missing_values"); + assertNotNull(missing); + assertThat(missing.getDocCount(), equalTo(finalMissingValues)); + + Terms terms = aggs.get("values"); + assertNotNull(terms); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(values.size())); + for (Terms.Bucket bucket : buckets) { + values.remove(((Number) bucket.getKey()).intValue()); + } + assertTrue(values.isEmpty()); + } + ); } /** @@ -108,13 +109,16 @@ public void testSubAggregationForTopAggregationOnUnmappedField() throws Exceptio ensureSearchable("idx"); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - SearchResponse searchResponse = prepareSearch("idx").addAggregation( - histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, Matchers.equalTo(0L)); - Histogram values = searchResponse.getAggregations().get("values"); - assertThat(values, notNullValue()); - assertThat(values.getBuckets().isEmpty(), is(true)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, Matchers.equalTo(0L)); + Histogram values = response.getAggregations().get("values"); + assertThat(values, notNullValue()); + assertThat(values.getBuckets().isEmpty(), is(true)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java index ea896c73f8882..f2aa79d115c4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; @@ -57,6 +56,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -125,7 +126,7 @@ public void testRandomRanges() throws Exception { source = source.value(docs[i][j]); } source = source.endArray().endObject(); - client().prepareIndex("idx").setSource(source).get(); + prepareIndex("idx").setSource(source).get(); } assertNoFailures(indicesAdmin().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get()); @@ -164,34 +165,35 @@ public void testRandomRanges() throws Exception { reqBuilder = reqBuilder.addAggregation(filter("filter" + i, filter)); } - SearchResponse resp = reqBuilder.get(); - Range range = resp.getAggregations().get("range"); - List buckets = range.getBuckets(); + assertResponse(reqBuilder, response -> { + Range range = response.getAggregations().get("range"); + List buckets = range.getBuckets(); - Map bucketMap = Maps.newMapWithExpectedSize(buckets.size()); - for (Bucket bucket : buckets) { - bucketMap.put(bucket.getKeyAsString(), bucket); - } + Map bucketMap = Maps.newMapWithExpectedSize(buckets.size()); + for (Bucket bucket : buckets) { + bucketMap.put(bucket.getKeyAsString(), bucket); + } - for (int i = 0; i < ranges.length; ++i) { + for (int i = 0; i < ranges.length; ++i) { - long count = 0; - for (double[] values : docs) { - for (double value : values) { - if (value >= ranges[i][0] && value < ranges[i][1]) { - ++count; - break; + long count = 0; + for (double[] values : docs) { + for (double value : values) { + if (value >= ranges[i][0] && value < ranges[i][1]) { + ++count; + break; + } } } - } - final Range.Bucket bucket = bucketMap.get(Integer.toString(i)); - assertEquals(bucket.getKeyAsString(), Integer.toString(i), bucket.getKeyAsString()); - assertEquals(bucket.getKeyAsString(), count, bucket.getDocCount()); + final Range.Bucket bucket = bucketMap.get(Integer.toString(i)); + assertEquals(bucket.getKeyAsString(), Integer.toString(i), bucket.getKeyAsString()); + assertEquals(bucket.getKeyAsString(), count, bucket.getDocCount()); - final Filter filter = resp.getAggregations().get("filter" + i); - assertThat(filter.getDocCount(), equalTo(count)); - } + final Filter filter = response.getAggregations().get("filter" + i); + assertThat(filter.getDocCount(), equalTo(count)); + } + }); } // test long/double/string terms aggs with high number of buckets that require array growth @@ -248,74 +250,77 @@ public void testDuelTerms() throws Exception { source = source.value(Integer.toString(values[j])); } source = source.endArray().endObject(); - indexingRequests.add(client().prepareIndex("idx").setSource(source)); + indexingRequests.add(prepareIndex("idx").setSource(source)); } indexRandom(true, indexingRequests); assertNoFailures(indicesAdmin().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); - SearchResponse resp = prepareSearch("idx").addAggregation( - terms("long").field("long_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(min("min").field("num")) - ) - .addAggregation( - terms("double").field("double_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(max("max").field("num")) - ) - .addAggregation( - terms("string_map").field("string_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) - .size(maxNumTerms) - .subAggregation(stats("stats").field("num")) - ) - .addAggregation( - terms("string_global_ordinals").field("string_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + assertResponse( + prepareSearch("idx").addAggregation( + terms("long").field("long_values") .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num")) - ) - .addAggregation( - terms("string_global_ordinals_doc_values").field("string_values.doc_values") .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) - .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num")) + .subAggregation(min("min").field("num")) ) - .get(); - assertAllSuccessful(resp); - assertEquals(numDocs, resp.getHits().getTotalHits().value); - - final Terms longTerms = resp.getAggregations().get("long"); - final Terms doubleTerms = resp.getAggregations().get("double"); - final Terms stringMapTerms = resp.getAggregations().get("string_map"); - final Terms stringGlobalOrdinalsTerms = resp.getAggregations().get("string_global_ordinals"); - final Terms stringGlobalOrdinalsDVTerms = resp.getAggregations().get("string_global_ordinals_doc_values"); - - assertEquals(valuesSet.size(), longTerms.getBuckets().size()); - assertEquals(valuesSet.size(), doubleTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size()); - assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size()); - for (Terms.Bucket bucket : longTerms.getBuckets()) { - final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsString()))); - final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsString()); - final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsString()); - final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsString()); - assertNotNull(doubleBucket); - assertNotNull(stringMapBucket); - assertNotNull(stringGlobalOrdinalsBucket); - assertNotNull(stringGlobalOrdinalsDVBucket); - assertEquals(bucket.getDocCount(), doubleBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount()); - assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount()); - } + .addAggregation( + terms("double").field("double_values") + .size(maxNumTerms) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(max("max").field("num")) + ) + .addAggregation( + terms("string_map").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) + .size(maxNumTerms) + .subAggregation(stats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals_doc_values").field("string_values.doc_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ), + response -> { + assertAllSuccessful(response); + assertEquals(numDocs, response.getHits().getTotalHits().value); + + final Terms longTerms = response.getAggregations().get("long"); + final Terms doubleTerms = response.getAggregations().get("double"); + final Terms stringMapTerms = response.getAggregations().get("string_map"); + final Terms stringGlobalOrdinalsTerms = response.getAggregations().get("string_global_ordinals"); + final Terms stringGlobalOrdinalsDVTerms = response.getAggregations().get("string_global_ordinals_doc_values"); + + assertEquals(valuesSet.size(), longTerms.getBuckets().size()); + assertEquals(valuesSet.size(), doubleTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size()); + assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size()); + for (Terms.Bucket bucket : longTerms.getBuckets()) { + final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsString()))); + final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsString()); + final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsString()); + final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsString()); + assertNotNull(doubleBucket); + assertNotNull(stringMapBucket); + assertNotNull(stringGlobalOrdinalsBucket); + assertNotNull(stringGlobalOrdinalsDVBucket); + assertEquals(bucket.getDocCount(), doubleBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount()); + assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount()); + } + } + ); } // Duel between histograms and scripted terms @@ -348,32 +353,33 @@ public void testDuelTermsHistogram() throws Exception { source = source.value(randomFrom(values)); } source = source.endArray().endObject(); - client().prepareIndex("idx").setSource(source).get(); + prepareIndex("idx").setSource(source).get(); } assertNoFailures(indicesAdmin().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); Map params = new HashMap<>(); params.put("interval", interval); - SearchResponse resp = prepareSearch("idx").addAggregation( - terms("terms").field("values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) - .size(maxNumTerms) - ).addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)).get(); - - assertNoFailures(resp); - - Terms terms = resp.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - Histogram histo = resp.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size())); - for (Histogram.Bucket bucket : histo.getBuckets()) { - final double key = ((Number) bucket.getKey()).doubleValue() / interval; - final Terms.Bucket termsBucket = terms.getBucketByKey(String.valueOf(key)); - assertEquals(bucket.getDocCount(), termsBucket.getDocCount()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").field("values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) + .size(maxNumTerms) + ).addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size())); + for (Histogram.Bucket bucket : histo.getBuckets()) { + final double key = ((Number) bucket.getKey()).doubleValue() / interval; + final Terms.Bucket termsBucket = terms.getBucketByKey(String.valueOf(key)); + assertEquals(bucket.getDocCount(), termsBucket.getDocCount()); + } + } + ); } public void testLargeNumbersOfPercentileBuckets() throws Exception { @@ -394,59 +400,64 @@ public void testLargeNumbersOfPercentileBuckets() throws Exception { logger.info("Indexing [{}] docs", numDocs); List indexingRequests = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { - indexingRequests.add(client().prepareIndex("idx").setId(Integer.toString(i)).setSource("double_value", randomDouble())); + indexingRequests.add(prepareIndex("idx").setId(Integer.toString(i)).setSource("double_value", randomDouble())); } indexRandom(true, indexingRequests); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").field("double_value") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(percentiles("pcts").field("double_value")) - ).get(); - assertAllSuccessful(response); - assertEquals(numDocs, response.getHits().getTotalHits().value); + assertResponse( + prepareSearch("idx").addAggregation( + terms("terms").field("double_value") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(percentiles("pcts").field("double_value")) + ), + response -> { + assertAllSuccessful(response); + assertEquals(numDocs, response.getHits().getTotalHits().value); + } + ); } // https://github.com/elastic/elasticsearch/issues/6435 public void testReduce() throws Exception { createIndex("idx"); final int value = randomIntBetween(0, 10); - indexRandom(true, client().prepareIndex("idx").setSource("f", value)); - SearchResponse response = prepareSearch("idx").addAggregation( - filter("filter", QueryBuilders.matchAllQuery()).subAggregation( - range("range").field("f").addUnboundedTo(6).addUnboundedFrom(6).subAggregation(sum("sum").field("f")) - ) - ).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("filter"); - assertNotNull(filter); - assertEquals(1, filter.getDocCount()); - - Range range = filter.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getDocCount(), equalTo(value < 6 ? 1L : 0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertEquals(value < 6 ? value : 0, sum.value(), 0d); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getDocCount(), equalTo(value >= 6 ? 1L : 0L)); - sum = bucket.getAggregations().get("sum"); - assertEquals(value >= 6 ? value : 0, sum.value(), 0d); + indexRandom(true, prepareIndex("idx").setSource("f", value)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + range("range").field("f").addUnboundedTo(6).addUnboundedFrom(6).subAggregation(sum("sum").field("f")) + ) + ), + response -> { + Filter filter = response.getAggregations().get("filter"); + assertNotNull(filter); + assertEquals(1, filter.getDocCount()); + + Range range = filter.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getDocCount(), equalTo(value < 6 ? 1L : 0L)); + Sum sum = bucket.getAggregations().get("sum"); + assertEquals(value < 6 ? value : 0, sum.value(), 0d); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getDocCount(), equalTo(value >= 6 ? 1L : 0L)); + sum = bucket.getAggregations().get("sum"); + assertEquals(value >= 6 ? value : 0, sum.value(), 0d); + } + ); } private void assertEquals(Terms t1, Terms t2) { @@ -469,46 +480,49 @@ public void testDuelDepthBreadthFirst() throws Exception { final int v1 = randomInt(1 << randomInt(7)); final int v2 = randomInt(1 << randomInt(7)); final int v3 = randomInt(1 << randomInt(7)); - reqs.add(client().prepareIndex("idx").setSource("f1", v1, "f2", v2, "f3", v3)); + reqs.add(prepareIndex("idx").setSource("f1", v1, "f2", v2, "f3", v3)); } indexRandom(true, reqs); - final SearchResponse r1 = prepareSearch("idx").addAggregation( - terms("f1").field("f1") - .collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation( - terms("f2").field("f2") - .collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)) - ) - ).get(); - assertNoFailures(r1); - final SearchResponse r2 = prepareSearch("idx").addAggregation( - terms("f1").field("f1") - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation( - terms("f2").field("f2") + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("f1").field("f1") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)) + ) + ), + response1 -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("f1").field("f1") .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)) - ) - ).get(); - assertNoFailures(r2); - - final Terms t1 = r1.getAggregations().get("f1"); - final Terms t2 = r2.getAggregations().get("f1"); - assertEquals(t1, t2); - for (Terms.Bucket b1 : t1.getBuckets()) { - final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString()); - final Terms sub1 = b1.getAggregations().get("f2"); - final Terms sub2 = b2.getAggregations().get("f2"); - assertEquals(sub1, sub2); - for (Terms.Bucket subB1 : sub1.getBuckets()) { - final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString()); - final Terms subSub1 = subB1.getAggregations().get("f3"); - final Terms subSub2 = subB2.getAggregations().get("f3"); - assertEquals(subSub1, subSub2); - } - } + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)) + ) + ), + response2 -> { + final Terms t1 = response1.getAggregations().get("f1"); + final Terms t2 = response2.getAggregations().get("f1"); + assertEquals(t1, t2); + for (Terms.Bucket b1 : t1.getBuckets()) { + final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString()); + final Terms sub1 = b1.getAggregations().get("f2"); + final Terms sub2 = b2.getAggregations().get("f2"); + assertEquals(sub1, sub2); + for (Terms.Bucket subB1 : sub1.getBuckets()) { + final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString()); + final Terms subSub1 = subB1.getAggregations().get("f3"); + final Terms subSub2 = subB2.getAggregations().get("f3"); + assertEquals(subSub1, subSub2); + } + } + } + ) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java index fe51f4a1e2fb4..3568391279a7a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.WrapperQueryBuilder; @@ -24,13 +23,15 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + public class FiltersAggsRewriteIT extends ESSingleNodeTestCase { public void testWrapperQueryIsRewritten() throws IOException { createIndex("test", Settings.EMPTY, "test", "title", "type=text"); - client().prepareIndex("test").setId("1").setSource("title", "foo bar baz").get(); - client().prepareIndex("test").setId("2").setSource("title", "foo foo foo").get(); - client().prepareIndex("test").setId("3").setSource("title", "bar baz bax").get(); + prepareIndex("test").setId("1").setSource("title", "foo bar baz").get(); + prepareIndex("test").setId("2").setSource("title", "foo foo foo").get(); + prepareIndex("test").setId("3").setSource("title", "bar baz bax").get(); client().admin().indices().prepareRefresh("test").get(); XContentType xContentType = randomFrom(XContentType.values()); @@ -54,11 +55,12 @@ public void testWrapperQueryIsRewritten() throws IOException { Map metadata = new HashMap<>(); metadata.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); builder.setMetadata(metadata); - SearchResponse searchResponse = client().prepareSearch("test").setSize(0).addAggregation(builder).get(); - assertEquals(3, searchResponse.getHits().getTotalHits().value); - InternalFilters filters = searchResponse.getAggregations().get("titles"); - assertEquals(1, filters.getBuckets().size()); - assertEquals(2, filters.getBuckets().get(0).getDocCount()); - assertEquals(metadata, filters.getMetadata()); + assertResponse(client().prepareSearch("test").setSize(0).addAggregation(builder), response -> { + assertEquals(3, response.getHits().getTotalHits().value); + InternalFilters filters = response.getAggregations().get("titles"); + assertEquals(1, filters.getBuckets().size()); + assertEquals(2, filters.getBuckets().get(0).getDocCount()); + assertEquals(metadata, filters.getMetadata()); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java index b255a7b5f9bb6..f22e0a2931634 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; @@ -22,7 +21,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class MetadataIT extends ESIntegTestCase { @@ -31,7 +30,7 @@ public void testMetadataSetOnAggregationResult() throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; for (int i = 0; i < builders.length; i++) { String name = "name_" + randomIntBetween(1, 10); - builders[i] = client().prepareIndex("idx").setSource("name", name, "value", randomInt()); + builders[i] = prepareIndex("idx").setSource("name", name, "value", randomInt()); } indexRandom(true, builders); ensureSearchable(); @@ -39,32 +38,33 @@ public void testMetadataSetOnAggregationResult() throws Exception { final var nestedMetadata = Map.of("nested", "value"); var metadata = Map.of("key", "value", "numeric", 1.2, "bool", true, "complex", nestedMetadata); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("the_terms").setMetadata(metadata).field("name").subAggregation(sum("the_sum").setMetadata(metadata).field("value")) - ).addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)).get(); - - assertNoFailures(response); - - Aggregations aggs = response.getAggregations(); - assertNotNull(aggs); - - Terms terms = aggs.get("the_terms"); - assertNotNull(terms); - assertMetadata(terms.getMetadata()); - - List buckets = terms.getBuckets(); - for (Terms.Bucket bucket : buckets) { - Aggregations subAggs = bucket.getAggregations(); - assertNotNull(subAggs); - - Sum sum = subAggs.get("the_sum"); - assertNotNull(sum); - assertMetadata(sum.getMetadata()); - } - - InternalBucketMetricValue maxBucket = aggs.get("the_max_bucket"); - assertNotNull(maxBucket); - assertMetadata(maxBucket.getMetadata()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("the_terms").setMetadata(metadata).field("name").subAggregation(sum("the_sum").setMetadata(metadata).field("value")) + ).addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)), + response -> { + Aggregations aggs = response.getAggregations(); + assertNotNull(aggs); + + Terms terms = aggs.get("the_terms"); + assertNotNull(terms); + assertMetadata(terms.getMetadata()); + + List buckets = terms.getBuckets(); + for (Terms.Bucket bucket : buckets) { + Aggregations subAggs = bucket.getAggregations(); + assertNotNull(subAggs); + + Sum sum = subAggs.get("the_sum"); + assertNotNull(sum); + assertMetadata(sum.getMetadata()); + } + + InternalBucketMetricValue maxBucket = aggs.get("the_max_bucket"); + assertNotNull(maxBucket); + assertMetadata(maxBucket.getMetadata()); + } + ); } private void assertMetadata(Map returnedMetadata) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java index ba20e86237530..8110bc124132a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java @@ -46,10 +46,8 @@ protected void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("idx").setMapping("date", "type=date", "location", "type=geo_point", "str", "type=keyword").get()); indexRandom( true, - client().prepareIndex("idx").setId("1").setSource(), - client().prepareIndex("idx") - .setId("2") - .setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2") + prepareIndex("idx").setId("1").setSource(), + prepareIndex("idx").setId("2").setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index 0af496d83f9db..4a6859620563c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -8,14 +8,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms; import org.elasticsearch.test.ESIntegTestCase; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -61,93 +60,95 @@ public void setupSuiteScopeCluster() throws Exception { } default -> throw new AssertionError(); } - builders[i] = client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, singleValue) - .array(MULTI_VALUED_FIELD_NAME, multiValue) - .endObject() - ); + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, singleValue) + .array(MULTI_VALUED_FIELD_NAME, multiValue) + .endObject() + ); } indexRandom(true, builders); } public void testSingleValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0; - assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - - LongTerms.Bucket bucket = terms.getBucketByKey("false"); - if (numSingleFalses == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numSingleFalses, bucket.getDocCount()); - assertEquals("false", bucket.getKeyAsString()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0; + assertThat(terms.getBuckets().size(), equalTo(bucketCount)); + + LongTerms.Bucket bucket = terms.getBucketByKey("false"); + if (numSingleFalses == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numSingleFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); + } - bucket = terms.getBucketByKey("true"); - if (numSingleTrues == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numSingleTrues, bucket.getDocCount()); - assertEquals("true", bucket.getKeyAsString()); - } + bucket = terms.getBucketByKey("true"); + if (numSingleTrues == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numSingleTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); + } + } + ); } public void testMultiValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0; - assertThat(terms.getBuckets(), hasSize(bucketCount)); - - LongTerms.Bucket bucket = terms.getBucketByKey("false"); - if (numMultiFalses == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numMultiFalses, bucket.getDocCount()); - assertEquals("false", bucket.getKeyAsString()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0; + assertThat(terms.getBuckets(), hasSize(bucketCount)); + + LongTerms.Bucket bucket = terms.getBucketByKey("false"); + if (numMultiFalses == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numMultiFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); + } - bucket = terms.getBucketByKey("true"); - if (numMultiTrues == 0) { - assertNull(bucket); - } else { - assertNotNull(bucket); - assertEquals(numMultiTrues, bucket.getDocCount()); - assertEquals("true", bucket.getKeyAsString()); - } + bucket = terms.getBucketByKey("true"); + if (numMultiTrues == 0) { + assertNull(bucket); + } else { + assertNotNull(bucket); + assertEquals(numMultiTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); + } + } + ); } public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .size(between(1, 5)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - UnmappedTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .size(between(1, 5)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + UnmappedTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(0)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 920fd79401cc6..a9ff9f15a7e92 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -61,6 +60,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -85,32 +85,30 @@ private static String format(ZonedDateTime date, String pattern) { } private IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { - return client().prepareIndex(idx) - .setSource( - jsonBuilder().startObject() - .timeField("date", date) - .field("value", value) - .startArray("dates") - .timeValue(date) - .timeValue(date.plusMonths(1).plusDays(1)) - .endArray() - .endObject() - ); + return prepareIndex(idx).setSource( + jsonBuilder().startObject() + .timeField("date", date) + .field("value", value) + .startArray("dates") + .timeValue(date) + .timeValue(date.plusMonths(1).plusDays(1)) + .endArray() + .endObject() + ); } private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .field("constant", 1) - .timeField("date", date(month, day)) - .startArray("dates") - .timeValue(date(month, day)) - .timeValue(date(month + 1, day + 1)) - .endArray() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .field("constant", 1) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } @Override @@ -121,9 +119,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } @@ -164,45 +160,55 @@ private void getMultiSortDocs(List builders) throws IOExcep assertAcked(indicesAdmin().prepareCreate("sort_idx").setMapping("date", "type=date").get()); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 1)).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 2)).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 2)).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 3)).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 3)).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 4)).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 4)).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 5)).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 5)).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 6)).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 6)).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 7)).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().timeField("date", date(1, 7)).field("l", 5).field("d", 1).endObject() + ) ); } @@ -225,94 +231,100 @@ private static String getBucketKeyAsString(ZonedDateTime key, ZoneId tz) { } public void testSingleValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testSingleValuedFieldWithTimeZone() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1).timeZone(ZoneId.of("+01:00")) - ).execute().actionGet(); - ZoneId tz = ZoneId.of("+01:00"); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(6)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(5); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .minDocCount(1) + .timeZone(ZoneId.of("+01:00")) + ), + response -> { + ZoneId tz = ZoneId.of("+01:00"); + ; + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(6)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + key = ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(5); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } public void testSingleValued_timeZone_epoch() throws Exception { @@ -322,257 +334,268 @@ public void testSingleValued_timeZone_epoch() throws Exception { format = format + "||date_optional_time"; } ZoneId tz = ZoneId.of("+01:00"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1).timeZone(tz).format(format) - ).get(); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(6)); - - List expectedKeys = new ArrayList<>(); - expectedKeys.add(ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC)); - expectedKeys.add(ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC)); - - Iterator keyIterator = expectedKeys.iterator(); - for (Histogram.Bucket bucket : buckets) { - assertThat(bucket, notNullValue()); - ZonedDateTime expectedKey = keyIterator.next(); - String bucketKey = bucket.getKeyAsString(); - String expectedBucketName = Long.toString(expectedKey.toInstant().toEpochMilli() / millisDivider); - assertThat(bucketKey, equalTo(expectedBucketName)); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(expectedKey)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1).timeZone(tz).format(format) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(6)); + + List expectedKeys = new ArrayList<>(); + expectedKeys.add(ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC)); + + Iterator keyIterator = expectedKeys.iterator(); + for (Histogram.Bucket bucket : buckets) { + assertThat(bucket, notNullValue()); + ZonedDateTime expectedKey = keyIterator.next(); + String bucketKey = bucket.getKeyAsString(); + String expectedBucketName = Long.toString(expectedKey.toInstant().toEpochMilli() / millisDivider); + assertThat(bucketKey, equalTo(expectedBucketName)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(expectedKey)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - int i = 0; - for (Histogram.Bucket bucket : buckets) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + int i = 0; + for (Histogram.Bucket bucket : buckets) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByCountAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 0; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByCountDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).subAggregation(sum("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(1.0)); - assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); - assertThat((long) propertiesDocCounts[0], equalTo(1L)); - assertThat((double) propertiesCounts[0], equalTo(1.0)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(5.0)); - assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); - assertThat((long) propertiesDocCounts[1], equalTo(2L)); - assertThat((double) propertiesCounts[1], equalTo(5.0)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(15.0)); - assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); - assertThat((long) propertiesDocCounts[2], equalTo(3L)); - assertThat((double) propertiesCounts[2], equalTo(15.0)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).subAggregation(sum("sum").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(1.0)); + assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); + assertThat((long) propertiesDocCounts[0], equalTo(1L)); + assertThat((double) propertiesCounts[0], equalTo(1.0)); + + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(5.0)); + assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); + assertThat((long) propertiesDocCounts[1], equalTo(2L)); + assertThat((double) propertiesCounts[1], equalTo(5.0)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(15.0)); + assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); + assertThat((long) propertiesDocCounts[2], equalTo(3L)); + assertThat((double) propertiesCounts[2], equalTo(15.0)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("sum", true)) - .subAggregation(max("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("sum", true)) + .subAggregation(max("sum").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 0; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("sum", false)) - .subAggregation(max("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("sum", false)) + .subAggregation(max("sum").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("stats", "sum", false)) - .subAggregation(stats("stats").field("value")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); - i--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("stats", "sum", false)) + .subAggregation(stats("stats").field("value")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 2; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("max_constant", randomBoolean())) - .subAggregation(max("max_constant").field("constant")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(3)); - - int i = 1; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(bucket.getKey(), equalTo(date(i, 1))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 1; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(bucket.getKey(), equalTo(date(i, 1))); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { @@ -607,41 +630,42 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } /* @@ -654,80 +678,80 @@ public void testSingleValuedFieldWithValueScript() throws Exception { */ public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testMultiValuedFieldOrderedByCountDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(4)); - - List buckets = new ArrayList<>(histo.getBuckets()); - - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(3, 1))); - assertThat(bucket.getDocCount(), equalTo(5L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(2, 1))); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(4, 1))); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo(date(1, 1))); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(4)); + + List buckets = new ArrayList<>(histo.getBuckets()); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(3, 1))); + assertThat(bucket.getDocCount(), equalTo(5L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(2, 1))); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(4, 1))); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(1, 1))); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } /** @@ -743,47 +767,48 @@ public void testMultiValuedFieldOrderedByCountDesc() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("dates") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("dates") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - - ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); - - key = ZonedDateTime.of(2012, 5, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + + key = ZonedDateTime.of(2012, 5, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } /** @@ -797,84 +822,86 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testScriptSingleValue() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testScriptMultiValued() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); - - key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } /* @@ -887,78 +914,81 @@ public void testScriptMultiValued() throws Exception { */ public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(0)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) - ).get(); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(dateHistogram("date_histo").field("value").fixedInterval(DateHistogramInterval.HOUR)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Histogram.Bucket bucket = buckets.get(1); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("1.0")); - - Histogram dateHisto = bucket.getAggregations().get("date_histo"); - assertThat(dateHisto, Matchers.notNullValue()); - assertThat(dateHisto.getName(), equalTo("date_histo")); - assertThat(dateHisto.getBuckets().isEmpty(), is(true)); - + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(dateHistogram("date_histo").field("value").fixedInterval(DateHistogramInterval.HOUR)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Histogram.Bucket bucket = buckets.get(1); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1.0")); + + Histogram dateHisto = bucket.getAggregations().get("date_histo"); + assertThat(dateHisto, Matchers.notNullValue()); + assertThat(dateHisto.getName(), equalTo("date_histo")); + assertThat(dateHisto.getBuckets().isEmpty(), is(true)); + } + ); } public void testSingleValueWithTimeZone() throws Exception { @@ -966,37 +996,37 @@ public void testSingleValueWithTimeZone() throws Exception { IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; ZonedDateTime date = date("2014-03-11T00:00:00+00:00"); for (int i = 0; i < reqs.length; i++) { - reqs[i] = client().prepareIndex("idx2") - .setId("" + i) - .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); + reqs[i] = prepareIndex("idx2").setId("" + i).setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(1); } indexRandom(true, reqs); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date") - .timeZone(ZoneId.of("-02:00")) - .calendarInterval(DateHistogramInterval.DAY) - .format("yyyy-MM-dd:HH-mm-ssZZZZZ") - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(5L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("2014-03-10:00-00-00-02:00")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("2014-03-11:00-00-00-02:00")); - assertThat(bucket.getDocCount(), equalTo(3L)); + assertNoFailuresAndResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date") + .timeZone(ZoneId.of("-02:00")) + .calendarInterval(DateHistogramInterval.DAY) + .format("yyyy-MM-dd:HH-mm-ssZZZZZ") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("2014-03-10:00-00-00-02:00")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("2014-03-11:00-00-00-02:00")); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testSingleValueFieldWithExtendedBounds() throws Exception { @@ -1066,47 +1096,42 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { long[] extendedValueCounts = new long[bucketsCount]; System.arraycopy(docCounts, 0, extendedValueCounts, addedBucketsLeft, docCounts.length); - SearchResponse response = null; try { - response = prepareSearch("idx2").addAggregation( - dateHistogram("histo").field("date") - .fixedInterval(DateHistogramInterval.days(interval)) - .minDocCount(0) - // when explicitly specifying a format, the extended bounds should be defined by the same format - .extendedBounds(new LongBounds(format(boundsMin, pattern), format(boundsMax, pattern))) - .format(pattern) - ).get(); - - if (invalidBoundsError) { - fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); - return; - } - + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.days(interval)) + .minDocCount(0) + // when explicitly specifying a format, the extended bounds should be defined by the same format + .extendedBounds(new LongBounds(format(boundsMin, pattern), format(boundsMax, pattern))) + .format(pattern) + ), + response -> { + if (invalidBoundsError) { + fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); + } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(bucketsCount)); + + ZonedDateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; + for (int i = 0; i < bucketsCount; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern))); + assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); + key = key.plusDays(interval); + } + } + ); } catch (Exception e) { - if (invalidBoundsError) { - // expected - return; - } else { + if (invalidBoundsError == false) { throw e; } } - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(bucketsCount)); - - ZonedDateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; - for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern))); - assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); - key = key.plusDays(interval); - } } /** @@ -1133,45 +1158,47 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { indexRandom(true, builders); ensureSearchable(index); - SearchResponse response = null; // retrieve those docs with the same time zone and extended bounds - response = prepareSearch(index).setQuery( - QueryBuilders.rangeQuery("date").from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId()) - ) - .addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.hours(1)) - .timeZone(timezone) - .minDocCount(0) - .extendedBounds(new LongBounds("now/d", "now/d+23h")) + assertNoFailuresAndResponse( + prepareSearch(index).setQuery( + QueryBuilders.rangeQuery("date").from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId()) ) - .get(); - assertNoFailures(response); - - assertThat( - "Expected 24 buckets for one day aggregation with hourly interval", - response.getHits().getTotalHits().value, - equalTo(2L) - ); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(24)); - - for (int i = 0; i < buckets.size(); i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - ZonedDateTime zonedDateTime = timeZoneStartToday.plus(i * 60 * 60 * 1000, ChronoUnit.MILLIS); - assertThat("InternalBucket " + i + " had wrong key", (ZonedDateTime) bucket.getKey(), equalTo(zonedDateTime)); - if (i == 0 || i == 12) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(0L)); + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.hours(1)) + .timeZone(timezone) + .minDocCount(0) + .extendedBounds(new LongBounds("now/d", "now/d+23h")) + ), + response -> { + + assertThat( + "Expected 24 buckets for one day aggregation with hourly interval", + response.getHits().getTotalHits().value, + equalTo(2L) + ); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(24)); + + for (int i = 0; i < buckets.size(); i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + ZonedDateTime zonedDateTime = timeZoneStartToday.plus(i * 60 * 60 * 1000, ChronoUnit.MILLIS); + assertThat("InternalBucket " + i + " had wrong key", (ZonedDateTime) bucket.getKey(), equalTo(zonedDateTime)); + if (i == 0 || i == 12) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(0L)); + } + } } - } + ); internalCluster().wipeIndices(index); + } /** @@ -1193,40 +1220,40 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { indexRandom(true, builders); ensureSearchable(index); - SearchResponse response = null; // retrieve those docs with the same time zone and extended bounds - response = prepareSearch(index).addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.days(1)) - .offset("+6h") - .minDocCount(0) - .extendedBounds(new LongBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z")) - ).get(); - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(8)); - - assertEquals("2016-01-01T06:00:00.000Z", buckets.get(0).getKeyAsString()); - assertEquals(0, buckets.get(0).getDocCount()); - assertEquals("2016-01-02T06:00:00.000Z", buckets.get(1).getKeyAsString()); - assertEquals(0, buckets.get(1).getDocCount()); - assertEquals("2016-01-03T06:00:00.000Z", buckets.get(2).getKeyAsString()); - assertEquals(2, buckets.get(2).getDocCount()); - assertEquals("2016-01-04T06:00:00.000Z", buckets.get(3).getKeyAsString()); - assertEquals(0, buckets.get(3).getDocCount()); - assertEquals("2016-01-05T06:00:00.000Z", buckets.get(4).getKeyAsString()); - assertEquals(0, buckets.get(4).getDocCount()); - assertEquals("2016-01-06T06:00:00.000Z", buckets.get(5).getKeyAsString()); - assertEquals(2, buckets.get(5).getDocCount()); - assertEquals("2016-01-07T06:00:00.000Z", buckets.get(6).getKeyAsString()); - assertEquals(0, buckets.get(6).getDocCount()); - assertEquals("2016-01-08T06:00:00.000Z", buckets.get(7).getKeyAsString()); - assertEquals(0, buckets.get(7).getDocCount()); - + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.days(1)) + .offset("+6h") + .minDocCount(0) + .extendedBounds(new LongBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(8)); + + assertEquals("2016-01-01T06:00:00.000Z", buckets.get(0).getKeyAsString()); + assertEquals(0, buckets.get(0).getDocCount()); + assertEquals("2016-01-02T06:00:00.000Z", buckets.get(1).getKeyAsString()); + assertEquals(0, buckets.get(1).getDocCount()); + assertEquals("2016-01-03T06:00:00.000Z", buckets.get(2).getKeyAsString()); + assertEquals(2, buckets.get(2).getDocCount()); + assertEquals("2016-01-04T06:00:00.000Z", buckets.get(3).getKeyAsString()); + assertEquals(0, buckets.get(3).getDocCount()); + assertEquals("2016-01-05T06:00:00.000Z", buckets.get(4).getKeyAsString()); + assertEquals(0, buckets.get(4).getDocCount()); + assertEquals("2016-01-06T06:00:00.000Z", buckets.get(5).getKeyAsString()); + assertEquals(2, buckets.get(5).getDocCount()); + assertEquals("2016-01-07T06:00:00.000Z", buckets.get(6).getKeyAsString()); + assertEquals(0, buckets.get(6).getDocCount()); + assertEquals("2016-01-08T06:00:00.000Z", buckets.get(7).getKeyAsString()); + assertEquals(0, buckets.get(7).getDocCount()); + } + ); internalCluster().wipeIndices(index); } @@ -1244,116 +1271,128 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception prepareCreate("idx2").setMapping(mappingJson).get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; for (int i = 0; i < reqs.length; i++) { - reqs[i] = client().prepareIndex("idx2") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("date", "10-03-2014").endObject()); + reqs[i] = prepareIndex("idx2").setId("" + i).setSource(jsonBuilder().startObject().field("date", "10-03-2014").endObject()); } indexRandom(true, reqs); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation(dateHistogram("date_histo").field("date").calendarInterval(DateHistogramInterval.DAY)) - .get(); + assertNoFailuresAndResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation(dateHistogram("date_histo").field("date").calendarInterval(DateHistogramInterval.DAY)), + response -> { - assertSearchHits(response, "0", "1", "2", "3", "4"); + assertSearchHits(response, "0", "1", "2", "3", "4"); - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(1)); + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(1)); - ZonedDateTime key = ZonedDateTime.of(2014, 3, 10, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(5L)); + ZonedDateTime key = ZonedDateTime.of(2014, 3, 10, 0, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } public void testIssue6965() { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").timeZone(ZoneId.of("+01:00")).calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - ).get(); - - assertNoFailures(response); - - ZoneId tz = ZoneId.of("+01:00"); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("+01:00")) + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + ), + response -> { + + ZoneId tz = ZoneId.of("+01:00"); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + ZonedDateTime key = ZonedDateTime.of(2011, 12, 31, 23, 0, 0, 0, ZoneOffset.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1L)); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - ZonedDateTime key = ZonedDateTime.of(2011, 12, 31, 23, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - key = ZonedDateTime.of(2012, 1, 31, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - key = ZonedDateTime.of(2012, 2, 29, 23, 0, 0, 0, ZoneOffset.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3L)); + key = ZonedDateTime.of(2012, 1, 31, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + key = ZonedDateTime.of(2012, 2, 29, 23, 0, 0, 0, ZoneOffset.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3L)); + } + ); } public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionException { assertAcked(indicesAdmin().prepareCreate("test9491").setMapping("d", "type=date").get()); indexRandom( true, - client().prepareIndex("test9491").setSource("d", "2014-10-08T13:00:00Z"), - client().prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z") + prepareIndex("test9491").setSource("d", "2014-10-08T13:00:00Z"), + prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z") ); ensureSearchable("test9491"); - SearchResponse response = prepareSearch("test9491").addAggregation( - dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.YEAR) - .timeZone(ZoneId.of("Asia/Jerusalem")) - .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+02:00")); - internalCluster().wipeIndices("test9491"); + assertNoFailuresAndResponse( + prepareSearch("test9491").addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.YEAR) + .timeZone(ZoneId.of("Asia/Jerusalem")) + .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+02:00")); + internalCluster().wipeIndices("test9491"); + } + ); } public void testIssue8209() throws InterruptedException, ExecutionException { assertAcked(indicesAdmin().prepareCreate("test8209").setMapping("d", "type=date").get()); indexRandom( true, - client().prepareIndex("test8209").setSource("d", "2014-01-01T00:00:00Z"), - client().prepareIndex("test8209").setSource("d", "2014-04-01T00:00:00Z"), - client().prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z") + prepareIndex("test8209").setSource("d", "2014-01-01T00:00:00Z"), + prepareIndex("test8209").setSource("d", "2014-04-01T00:00:00Z"), + prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z") ); ensureSearchable("test8209"); - SearchResponse response = prepareSearch("test8209").addAggregation( - dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.MONTH) - .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") - .timeZone(ZoneId.of("CET")) - .minDocCount(0) - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+01:00")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); - assertThat(histo.getBuckets().get(1).getKeyAsString(), equalTo("2014-02-01T00:00:00.000+01:00")); - assertThat(histo.getBuckets().get(1).getDocCount(), equalTo(0L)); - assertThat(histo.getBuckets().get(2).getKeyAsString(), equalTo("2014-03-01T00:00:00.000+01:00")); - assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(0L)); - assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("2014-04-01T00:00:00.000+02:00")); - assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(2L)); - internalCluster().wipeIndices("test8209"); + assertNoFailuresAndResponse( + prepareSearch("test8209").addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.MONTH) + .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") + .timeZone(ZoneId.of("CET")) + .minDocCount(0) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+01:00")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + assertThat(histo.getBuckets().get(1).getKeyAsString(), equalTo("2014-02-01T00:00:00.000+01:00")); + assertThat(histo.getBuckets().get(1).getDocCount(), equalTo(0L)); + assertThat(histo.getBuckets().get(2).getKeyAsString(), equalTo("2014-03-01T00:00:00.000+01:00")); + assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(0L)); + assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("2014-04-01T00:00:00.000+02:00")); + assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(2L)); + internalCluster().wipeIndices("test8209"); + } + ); } // TODO: add some tests for negative fixed and calendar intervals @@ -1367,22 +1406,25 @@ public void testIssue8209() throws InterruptedException, ExecutionException { */ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionException { String indexDateUnmapped = "test31760"; - indexRandom(true, client().prepareIndex(indexDateUnmapped).setSource("foo", "bar")); + indexRandom(true, prepareIndex(indexDateUnmapped).setSource("foo", "bar")); ensureSearchable(indexDateUnmapped); - SearchResponse response = prepareSearch(indexDateUnmapped).addAggregation( - dateHistogram("histo").field("dateField") - .calendarInterval(DateHistogramInterval.MONTH) - .format("yyyy-MM") - .minDocCount(0) - .extendedBounds(new LongBounds("2018-01", "2018-01")) - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2018-01")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(0L)); - internalCluster().wipeIndices(indexDateUnmapped); + assertNoFailuresAndResponse( + prepareSearch(indexDateUnmapped).addAggregation( + dateHistogram("histo").field("dateField") + .calendarInterval(DateHistogramInterval.MONTH) + .format("yyyy-MM") + .minDocCount(0) + .extendedBounds(new LongBounds("2018-01", "2018-01")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2018-01")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(0L)); + internalCluster().wipeIndices(indexDateUnmapped); + } + ); } /** @@ -1393,29 +1435,33 @@ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionExce public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, ExecutionException { String index = "test31392"; assertAcked(indicesAdmin().prepareCreate(index).setMapping("d", "type=date,format=epoch_millis").get()); - indexRandom(true, client().prepareIndex(index).setSource("d", "1477954800000")); + indexRandom(true, prepareIndex(index).setSource("d", "1477954800000")); ensureSearchable(index); - SearchResponse response = prepareSearch(index).addAggregation( - dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")) - ).get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); - - response = prepareSearch(index).addAggregation( - dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.MONTH) - .timeZone(ZoneId.of("Europe/Berlin")) - .format("yyyy-MM-dd") - ).get(); - assertNoFailures(response); - histo = response.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2016-11-01")); - assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); - + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + } + ); + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.MONTH) + .timeZone(ZoneId.of("Europe/Berlin")) + .format("yyyy-MM-dd") + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(1)); + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2016-11-01")); + assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); + } + ); internalCluster().wipeIndices(index); } @@ -1427,62 +1473,73 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, * "2015-10-25T04:00:00.000+01:00". */ public void testDSTEndTransition() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) - .addAggregation( - dateHistogram("histo").field("date") - .timeZone(ZoneId.of("Europe/Oslo")) - .calendarInterval(DateHistogramInterval.HOUR) - .minDocCount(0) - .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) - ) - .get(); - - Histogram histo = response.getAggregations().get("histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - assertThat( - ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) + .addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("Europe/Oslo")) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(0) + .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) + ), + response -> { + + Histogram histo = response.getAggregations().get("histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + assertThat( + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + } ); - response = prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) - .addAggregation( - dateHistogram("histo").field("date") - .timeZone(ZoneId.of("Europe/Oslo")) - .calendarInterval(DateHistogramInterval.HOUR) - .minDocCount(0) - .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) - ) - .get(); - - histo = response.getAggregations().get("histo"); - buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(4)); - assertThat( - ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) - ); - assertThat( - ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()).toInstant() - .toEpochMilli(), - equalTo(3600000L) + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(new MatchNoneQueryBuilder()) + .addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("Europe/Oslo")) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(0) + .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + assertThat( + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()) + .toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + } ); } @@ -1499,8 +1556,8 @@ public void testScriptCaching() throws Exception { String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("d", date), - client().prepareIndex("cache_test_idx").setId("2").setSource("d", date2) + prepareIndex("cache_test_idx").setId("1").setSource("d", date), + prepareIndex("cache_test_idx").setId("2").setSource("d", date2) ); // Make sure we are starting with a clear cache @@ -1516,14 +1573,14 @@ public void testScriptCaching() throws Exception { // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "d"); - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateHistogram("histo").field("d") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateHistogram("histo").field("d") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1535,14 +1592,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateHistogram("histo").field("d") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateHistogram("histo").field("d") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1554,10 +1611,10 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1611,34 +1668,35 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { ZonedDateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(ZonedDateTime[]::new); - SearchResponse response = prepareSearch("sort_idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.DAY) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - Histogram histogram = response.getAggregations().get("histo"); - assertThat(histogram, notNullValue()); - assertThat(histogram.getName(), equalTo("histo")); - assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (Histogram.Bucket bucket : histogram.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo(expectedKeys[i])); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } private ZonedDateTime key(Histogram.Bucket bucket) { @@ -1651,67 +1709,72 @@ private ZonedDateTime key(Histogram.Bucket bucket) { */ public void testDateNanosHistogram() throws Exception { assertAcked(prepareCreate("nanos").setMapping("date", "type=date_nanos").get()); - indexRandom(true, client().prepareIndex("nanos").setId("1").setSource("date", "2000-01-01")); - indexRandom(true, client().prepareIndex("nanos").setId("2").setSource("date", "2000-01-02")); + indexRandom(true, prepareIndex("nanos").setId("1").setSource("date", "2000-01-01")); + indexRandom(true, prepareIndex("nanos").setId("2").setSource("date", "2000-01-02")); // Search interval 24 hours - SearchResponse r = prepareSearch("nanos").addAggregation( - dateHistogram("histo").field("date") - .fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)) - .timeZone(ZoneId.of("Europe/Berlin")) - ).addDocValueField("date").get(); - assertNoFailures(r); - - Histogram histogram = r.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertEquals(2, buckets.size()); - assertEquals(946681200000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(946767600000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(1).getDocCount()); - - r = prepareSearch("nanos").addAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("UTC")) - ).addDocValueField("date").get(); - assertNoFailures(r); - - histogram = r.getAggregations().get("histo"); - buckets = histogram.getBuckets(); - assertEquals(2, buckets.size()); - assertEquals(946684800000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(946771200000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); - assertEquals(1, buckets.get(1).getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("nanos").addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)) + .timeZone(ZoneId.of("Europe/Berlin")) + ).addDocValueField("date"), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(946681200000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(946767600000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(1).getDocCount()); + } + ); + + assertNoFailuresAndResponse( + prepareSearch("nanos").addAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("UTC")) + ).addDocValueField("date"), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(946684800000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(946771200000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(1).getDocCount()); + } + ); } public void testDateKeyFormatting() { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("America/Edmonton")) - ).get(); - - assertNoFailures(response); - - InternalDateHistogram histogram = response.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertThat(buckets.get(0).getKeyAsString(), equalTo("2012-01-01T00:00:00.000-07:00")); - assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-01T00:00:00.000-07:00")); - assertThat(buckets.get(2).getKeyAsString(), equalTo("2012-03-01T00:00:00.000-07:00")); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("America/Edmonton")) + ), + response -> { + InternalDateHistogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertThat(buckets.get(0).getKeyAsString(), equalTo("2012-01-01T00:00:00.000-07:00")); + assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-01T00:00:00.000-07:00")); + assertThat(buckets.get(2).getKeyAsString(), equalTo("2012-03-01T00:00:00.000-07:00")); + } + ); } public void testHardBoundsOnDates() { - SearchResponse response = prepareSearch("idx").addAggregation( - dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.DAY) - .hardBounds(new LongBounds("2012-02-01T00:00:00.000", "2012-03-03T00:00:00.000")) - ).get(); - - assertNoFailures(response); - - InternalDateHistogram histogram = response.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertThat(buckets.size(), equalTo(30)); - assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-03T00:00:00.000Z")); - assertThat(buckets.get(29).getKeyAsString(), equalTo("2012-03-02T00:00:00.000Z")); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2012-02-01T00:00:00.000", "2012-03-03T00:00:00.000")) + ), + response -> { + InternalDateHistogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertThat(buckets.size(), equalTo(30)); + assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-03T00:00:00.000Z")); + assertThat(buckets.get(29).getKeyAsString(), equalTo("2012-03-02T00:00:00.000Z")); + } + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 64c1a7c8859fc..5abf52cf37f88 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -25,6 +24,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -61,8 +61,7 @@ private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, i IndexRequestBuilder[] reqs = new IndexRequestBuilder[numHours]; for (int i = idxIdStart; i < idxIdStart + reqs.length; i++) { - reqs[i - idxIdStart] = client().prepareIndex("idx2") - .setId("" + i) + reqs[i - idxIdStart] = prepareIndex("idx2").setId("" + i) .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(stepSizeHours); } @@ -72,39 +71,43 @@ private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, i public void testSingleValueWithPositiveOffset() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, 1, 0); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date").offset("2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(5L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 2, 0, 0, 0, ZoneOffset.UTC), 2L); - checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 2, 0, 0, 0, ZoneOffset.UTC), 3L); + assertResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date").offset("2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 2, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 2, 0, 0, 0, ZoneOffset.UTC), 3L); + } + ); } public void testSingleValueWithNegativeOffset() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, -1, 0); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date").offset("-2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(5L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 9, 22, 0, 0, 0, ZoneOffset.UTC), 2L); - checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 10, 22, 0, 0, 0, ZoneOffset.UTC), 3L); + assertResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date").offset("-2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 9, 22, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 10, 22, 0, 0, 0, ZoneOffset.UTC), 3L); + } + ); } /** @@ -114,27 +117,29 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 12, 1, 0); prepareIndex(date("2014-03-14T00:00:00+00:00"), 12, 1, 13); - SearchResponse response = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation( - dateHistogram("date_histo").field("date") - .offset("6h") - .minDocCount(0) - .format(DATE_FORMAT) - .fixedInterval(DateHistogramInterval.DAY) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(24L)); - - Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(5)); - - checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 6, 0, 0, 0, ZoneOffset.UTC), 6L); - checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 6, 0, 0, 0, ZoneOffset.UTC), 6L); - checkBucketFor(buckets.get(2), ZonedDateTime.of(2014, 3, 12, 6, 0, 0, 0, ZoneOffset.UTC), 0L); - checkBucketFor(buckets.get(3), ZonedDateTime.of(2014, 3, 13, 6, 0, 0, 0, ZoneOffset.UTC), 6L); - checkBucketFor(buckets.get(4), ZonedDateTime.of(2014, 3, 14, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + assertResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date") + .offset("6h") + .minDocCount(0) + .format(DATE_FORMAT) + .fixedInterval(DateHistogramInterval.DAY) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(24L)); + + Histogram histo = response.getAggregations().get("date_histo"); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(5)); + + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(2), ZonedDateTime.of(2014, 3, 12, 6, 0, 0, 0, ZoneOffset.UTC), 0L); + checkBucketFor(buckets.get(3), ZonedDateTime.of(2014, 3, 13, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(4), ZonedDateTime.of(2014, 3, 14, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + } + ); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 44b0ff05ea274..0a726fcec5a88 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -43,6 +42,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -55,17 +55,16 @@ public class DateRangeIT extends ESIntegTestCase { private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .timeField("date", date(month, day)) - .startArray("dates") - .timeValue(date(month, day)) - .timeValue(date(month + 1, day + 1)) - .endArray() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } private static ZonedDateTime date(int month, int day) { @@ -104,9 +103,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping("value", "type=integer")); for (int i = 0; i < 2; i++) { docs.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, docs); @@ -127,166 +124,173 @@ public void testDateMath() throws Exception { } else { rangeBuilder.script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)); } - SearchResponse response = prepareSearch("idx").addAggregation( - rangeBuilder.addUnboundedTo("a long time ago", "now-50y") - .addRange("recently", "now-50y", "now-1y") - .addUnboundedFrom("last year", "now-1y") - .timeZone(ZoneId.of("Etc/GMT+5")) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - assertThat(range.getBuckets().size(), equalTo(3)); - - List buckets = new ArrayList<>(range.getBuckets()); - - Range.Bucket bucket = buckets.get(0); - assertThat((String) bucket.getKey(), equalTo("a long time ago")); - assertThat(bucket.getKeyAsString(), equalTo("a long time ago")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat((String) bucket.getKey(), equalTo("recently")); - assertThat(bucket.getKeyAsString(), equalTo("recently")); - assertThat(bucket.getDocCount(), equalTo((long) numDocs)); - - bucket = buckets.get(2); - assertThat((String) bucket.getKey(), equalTo("last year")); - assertThat(bucket.getKeyAsString(), equalTo("last year")); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + rangeBuilder.addUnboundedTo("a long time ago", "now-50y") + .addRange("recently", "now-50y", "now-1y") + .addUnboundedFrom("last year", "now-1y") + .timeZone(ZoneId.of("Etc/GMT+5")) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + assertThat(range.getBuckets().size(), equalTo(3)); + + List buckets = new ArrayList<>(range.getBuckets()); + + Range.Bucket bucket = buckets.get(0); + assertThat((String) bucket.getKey(), equalTo("a long time ago")); + assertThat(bucket.getKeyAsString(), equalTo("a long time ago")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat((String) bucket.getKey(), equalTo("recently")); + assertThat(bucket.getKeyAsString(), equalTo("recently")); + assertThat(bucket.getDocCount(), equalTo((long) numDocs)); + + bucket = buckets.get(2); + assertThat((String) bucket.getKey(), equalTo("last year")); + assertThat(bucket.getKeyAsString(), equalTo("last year")); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testSingleValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date").addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testSingleValueFieldWithStringDates() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15") - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .format("yyyy-MM-dd") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15") - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15-2012-03-15")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .format("yyyy-MM-dd") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15-2012-03-15")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testSingleValueFieldWithDateMath() throws Exception { @@ -297,92 +301,97 @@ public void testSingleValueFieldWithDateMath() throws Exception { String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3, 15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); long expectedFirstBucketCount = timeZoneOffset < 0 ? 3L : 2L; - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-02-15||+1M") - .addUnboundedFrom("2012-02-15||+1M") - .timeZone(timezone) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + "-2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-02-15||+1M") + .addUnboundedFrom("2012-02-15||+1M") + .timeZone(timezone) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat( + (String) bucket.getKey(), + equalTo("2012-02-15T00:00:00.000" + feb15Suffix + "-2012-03-15T00:00:00.000" + mar15Suffix) + ); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); + } + ); } public void testSingleValueFieldWithCustomKey() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("r1", date(2, 15)) - .addRange("r2", date(2, 15), date(3, 15)) - .addUnboundedFrom("r3", date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("r1", date(2, 15)) + .addRange("r2", date(2, 15), date(3, 15)) + .addUnboundedFrom("r3", date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r1")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r2")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r3")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -395,68 +404,69 @@ public void testSingleValueFieldWithCustomKey() throws Exception { */ public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("date") - .addUnboundedTo("r1", date(2, 15)) - .addRange("r2", date(2, 15), date(3, 15)) - .addUnboundedFrom("r3", date(3, 15)) - .subAggregation(sum("sum").field("value")) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - assertThat(((InternalAggregation) range).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo((double) 1 + 2)); - assertThat((String) propertiesKeys[0], equalTo("r1")); - assertThat((long) propertiesDocCounts[0], equalTo(2L)); - assertThat((double) propertiesCounts[0], equalTo((double) 1 + 2)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo((double) 3 + 4)); - assertThat((String) propertiesKeys[1], equalTo("r2")); - assertThat((long) propertiesDocCounts[1], equalTo(2L)); - assertThat((double) propertiesCounts[1], equalTo((double) 3 + 4)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat((String) propertiesKeys[2], equalTo("r3")); - assertThat((long) propertiesDocCounts[2], equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("date") + .addUnboundedTo("r1", date(2, 15)) + .addRange("r2", date(2, 15), date(3, 15)) + .addUnboundedFrom("r3", date(3, 15)) + .subAggregation(sum("sum").field("value")) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + assertThat(((InternalAggregation) range).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r1")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo((double) 1 + 2)); + assertThat((String) propertiesKeys[0], equalTo("r1")); + assertThat((long) propertiesDocCounts[0], equalTo(2L)); + assertThat((double) propertiesCounts[0], equalTo((double) 1 + 2)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r2")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo((double) 3 + 4)); + assertThat((String) propertiesKeys[1], equalTo("r2")); + assertThat((long) propertiesDocCounts[1], equalTo(2L)); + assertThat((double) propertiesCounts[1], equalTo((double) 3 + 4)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("r3")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat((String) propertiesKeys[2], equalTo("r3")); + assertThat((long) propertiesDocCounts[2], equalTo(numDocs - 4L)); + } + ); } /* @@ -469,113 +479,123 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { */ public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - dateRange("range").field("dates").addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + dateRange("range").field("dates") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - dateRange("range").field("date").addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + dateRange("range").field("date") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(dateRange("date_range").field("value").addRange("0-1", 0, 1)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Range dateRange = bucket.getAggregations().get("date_range"); - List buckets = new ArrayList<>(dateRange.getBuckets()); - assertThat(dateRange, Matchers.notNullValue()); - assertThat(dateRange.getName(), equalTo("date_range")); - assertThat(buckets.size(), is(1)); - assertThat((String) buckets.get(0).getKey(), equalTo("0-1")); - assertThat(((ZonedDateTime) buckets.get(0).getFrom()).toInstant().toEpochMilli(), equalTo(0L)); - assertThat(((ZonedDateTime) buckets.get(0).getTo()).toInstant().toEpochMilli(), equalTo(1L)); - assertThat(buckets.get(0).getDocCount(), equalTo(0L)); - assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(dateRange("date_range").field("value").addRange("0-1", 0, 1)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Range dateRange = bucket.getAggregations().get("date_range"); + List buckets = new ArrayList<>(dateRange.getBuckets()); + assertThat(dateRange, Matchers.notNullValue()); + assertThat(dateRange.getName(), equalTo("date_range")); + assertThat(buckets.size(), is(1)); + assertThat((String) buckets.get(0).getKey(), equalTo("0-1")); + assertThat(((ZonedDateTime) buckets.get(0).getFrom()).toInstant().toEpochMilli(), equalTo(0L)); + assertThat(((ZonedDateTime) buckets.get(0).getTo()).toInstant().toEpochMilli(), equalTo(1L)); + assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); + } + ); } public void testNoRangesInQuery() { @@ -600,12 +620,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx") - .setId("1") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).endObject()), - client().prepareIndex("cache_test_idx") - .setId("2") - .setSource(jsonBuilder().startObject().timeField("date", date(2, 1)).endObject()) + prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).endObject()), + prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().timeField("date", date(2, 1)).endObject()) ); // Make sure we are starting with a clear cache @@ -621,17 +637,17 @@ public void testScriptCaching() throws Exception { // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateRange("foo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) - .addRange( - ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) - ) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateRange("foo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -643,17 +659,17 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateRange("foo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) - .addRange( - ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) - ) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateRange("foo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -665,16 +681,16 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - dateRange("foo").field("date") - .addRange( - ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) - ) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + dateRange("foo").field("date") + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -695,45 +711,54 @@ public void testRangeWithFormatStringValue() throws Exception { assertAcked(prepareCreate(indexName).setMapping("date", "type=date,format=strict_hour_minute_second")); indexRandom( true, - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", "00:16:40").endObject()), - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", "00:33:20").endObject()), - client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", "00:50:00").endObject()) + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", "00:16:40").endObject()), + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", "00:33:20").endObject()), + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", "00:50:00").endObject()) ); // using no format should work when to/from is compatible with format in // mapping - SearchResponse searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("00:16:40", "00:50:00").addRange("00:50:00", "01:06:40")) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "00:16:40-00:50:00", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "00:50:00-01:06:40", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange("00:16:40", "00:50:00").addRange("00:50:00", "01:06:40")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "00:16:40-00:50:00", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "00:50:00-01:06:40", 3000000L, 4000000L); + } + ); // using different format should work when to/from is compatible with // format in aggregation - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date") + .addRange("00.16.40", "00.50.00") + .addRange("00.50.00", "01.06.40") + .format("HH.mm.ss") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); + } + ); // providing numeric input with format should work, but bucket keys are // different now - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + } + ); // providing numeric input without format should throw an exception ElasticsearchException e = expectThrows( ElasticsearchException.class, @@ -753,62 +778,76 @@ public void testRangeWithFormatNumericValue() throws Exception { assertAcked(prepareCreate(indexName).setMapping("date", "type=date,format=epoch_second")); indexRandom( true, - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), - client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", 3008).endObject()) + prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), + prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), + prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", 3008).endObject()) ); // using no format should work when to/from is compatible with format in // mapping - SearchResponse searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } + ); // using no format should also work when and to/from are string values - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } + ); // also e-notation should work, fractional parts should be truncated - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } + ); // using different format should work when to/from is compatible with // format in aggregation - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); - + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date") + .addRange("00.16.40", "00.50.00") + .addRange("00.50.00", "01.06.40") + .format("HH.mm.ss") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "00.50.00-01.06.40", 3000000L, 4000000L); + } + ); // providing different numeric input with format should work, but bucket // keys are different now - searchResponse = prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") - ) - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + assertNoFailuresAndResponse( + prepareSearch(indexName).setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + List buckets = checkBuckets(response.getAggregations().get("date_range"), "date_range", 2); + assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + } + ); } private static List checkBuckets(Range dateRange, String expectedAggName, long expectedBucketsSize) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 612b4bf006aa2..3a313cec29402 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.aggregations.BucketOrder; @@ -29,6 +28,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -77,12 +77,10 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < data.length; i++) { String[] parts = data[i].split(","); - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); - client().prepareIndex("idx_unmapped_author") - .setId("" + i) + prepareIndex("idx_unmapped_author").setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); } @@ -93,31 +91,32 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation( - terms("genres").field("genre") - .order(BucketOrder.aggregation("sample>max_price.value", asc)) - .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) - ) - .get(); - assertNoFailures(response); - Terms genres = response.getAggregations().get("genres"); - Collection genreBuckets = genres.getBuckets(); - // For this test to be useful we need >1 genre bucket to compare - assertThat(genreBuckets.size(), greaterThan(1)); - double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; - for (Terms.Bucket genreBucket : genres.getBuckets()) { - Sampler sample = genreBucket.getAggregations().get("sample"); - Max maxPriceInGenre = sample.getAggregations().get("max_price"); - double price = maxPriceInGenre.value(); - if (asc) { - assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); - } else { - assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + terms("genres").field("genre") + .order(BucketOrder.aggregation("sample>max_price.value", asc)) + .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) + ), + response -> { + Terms genres = response.getAggregations().get("genres"); + Collection genreBuckets = genres.getBuckets(); + // For this test to be useful we need >1 genre bucket to compare + assertThat(genreBuckets.size(), greaterThan(1)); + double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; + for (Terms.Bucket genreBucket : genres.getBuckets()) { + Sampler sample = genreBucket.getAggregations().get("sample"); + Max maxPriceInGenre = sample.getAggregations().get("max_price"); + double price = maxPriceInGenre.value(); + if (asc) { + assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); + } else { + assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + } + lastMaxPrice = price; + } } - lastMaxPrice = price; - } - + ); } public void testSimpleDiversity() throws Exception { @@ -125,20 +124,22 @@ public void testSimpleDiversity() throws Exception { DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - Terms authors = sample.getAggregations().get("authors"); - List testBuckets = authors.getBuckets(); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + List testBuckets = authors.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); - } + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + } + } + ); } public void testNestedDiversity() throws Exception { @@ -151,19 +152,22 @@ public void testNestedDiversity() throws Exception { sampleAgg.subAggregation(terms("authors").field("author")); rootTerms.subAggregation(sampleAgg); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootTerms).get(); - assertNoFailures(response); - Terms genres = response.getAggregations().get("genres"); - List genreBuckets = genres.getBuckets(); - for (Terms.Bucket genreBucket : genreBuckets) { - Sampler sample = genreBucket.getAggregations().get("sample"); - Terms authors = sample.getAggregations().get("authors"); - List testBuckets = authors.getBuckets(); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootTerms), + response -> { + Terms genres = response.getAggregations().get("genres"); + List genreBuckets = genres.getBuckets(); + for (Terms.Bucket genreBucket : genreBuckets) { + Sampler sample = genreBucket.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + List testBuckets = authors.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + } + } } - } + ); } public void testNestedSamples() throws Exception { @@ -180,22 +184,25 @@ public void testNestedSamples() throws Exception { sampleAgg.subAggregation(terms("genres").field("genre")); rootSample.subAggregation(sampleAgg); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootSample).get(); - assertNoFailures(response); - Sampler genreSample = response.getAggregations().get("genreSample"); - Sampler sample = genreSample.getAggregations().get("sample"); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootSample), + response -> { + Sampler genreSample = response.getAggregations().get("genreSample"); + Sampler sample = genreSample.getAggregations().get("sample"); - Terms genres = sample.getAggregations().get("genres"); - List testBuckets = genres.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_GENRE)); - } + Terms genres = sample.getAggregations().get("genres"); + List testBuckets = genres.getBuckets(); + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_GENRE)); + } - Terms authors = sample.getAggregations().get("authors"); - testBuckets = authors.getBuckets(); - for (Terms.Bucket testBucket : testBuckets) { - assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); - } + Terms authors = sample.getAggregations().get("authors"); + testBuckets = authors.getBuckets(); + for (Terms.Bucket testBucket : testBuckets) { + assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); + } + } + ); } public void testPartiallyUnmappedDiversifyField() throws Exception { @@ -205,17 +212,19 @@ public void testPartiallyUnmappedDiversifyField() throws Exception { .field("author") .maxDocsPerValue(1); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped_author", "test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), greaterThan(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertThat(authors.getBuckets().size(), greaterThan(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped_author", "test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), greaterThan(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertThat(authors.getBuckets().size(), greaterThan(0)); + } + ); } public void testWhollyUnmappedDiversifyField() throws Exception { @@ -224,17 +233,19 @@ public void testWhollyUnmappedDiversifyField() throws Exception { DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped", "idx_unmapped_author").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), equalTo(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertNull(authors); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx_unmapped_author").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), equalTo(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertNull(authors); + } + ); } public void testRidiculousSizeDiversity() throws Exception { @@ -242,24 +253,23 @@ public void testRidiculousSizeDiversity() throws Exception { DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(Integer.MAX_VALUE); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); + assertNoFailures( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + ); sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(Integer.MAX_VALUE).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); + assertNoFailures( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 0381a5521dea0..1500c203ea4db 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -55,6 +54,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -115,33 +115,31 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < NUM_DOCS; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, (double) i) - .field("num_tag", i < NUM_DOCS / 2 + 1 ? 1.0 : 0.0) // used to test order by single-bucket sub agg - .field("constant", 1) - .startArray(MULTI_VALUED_FIELD_NAME) - .value((double) i) - .value(i + 1d) - .endArray() - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, (double) i) + .field("num_tag", i < NUM_DOCS / 2 + 1 ? 1.0 : 0.0) // used to test order by single-bucket sub agg + .field("constant", 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value((double) i) + .value(i + 1d) + .endArray() + .endObject() + ) ); } for (int i = 0; i < 100; i++) { builders.add( - client().prepareIndex("high_card_idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, (double) i) - .startArray(MULTI_VALUED_FIELD_NAME) - .value((double) i) - .value(i + 1d) - .endArray() - .endObject() - ) + prepareIndex("high_card_idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, (double) i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value((double) i) + .value(i + 1d) + .endArray() + .endObject() + ) ); } @@ -149,8 +147,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -209,45 +206,55 @@ private void getMultiSortDocs(List builders) throws IOExcep assertAcked(prepareCreate("sort_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=double")); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject() + ) ); } @@ -275,105 +282,116 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms - SearchResponse allResponse = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(allResponse); - DoubleTerms terms = allResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - int expectedCardinality = terms.getBuckets().size(); + int[] expectedCardinality = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + assertNoFailures(response); + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + expectedCardinality[0] = terms.getBuckets().size(); + } + ); // Gather terms using partitioned aggregations final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - for (DoubleTerms.Bucket bucket : terms.getBuckets()) { - assertTrue(foundTerms.add(bucket.getKeyAsNumber())); - assertThat(bucket.getKeyAsNumber(), instanceOf(Double.class)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (DoubleTerms.Bucket bucket : terms.getBuckets()) { + assertTrue(foundTerms.add(bucket.getKeyAsNumber())); + assertThat(bucket.getKeyAsNumber(), instanceOf(Double.class)); + } + } + ); } - assertEquals(expectedCardinality, foundTerms.size()); + assertEquals(expectedCardinality[0], foundTerms.size()); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(long) (_value / 1000 + 1)", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); - - DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("1.0")); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); - assertThat(bucket.getDocCount(), equalTo(5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(long) (_value / 1000 + 1)", Collections.emptyMap())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + + DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1.0")); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } /* @@ -394,239 +412,251 @@ public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { */ public void testScriptSingleValue() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.DOUBLE) - .script( - new Script( - ScriptType.INLINE, - CustomScriptPlugin.NAME, - "doc['" + MULTI_VALUED_FIELD_NAME + "'].value", - Collections.emptyMap() + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.DOUBLE) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ) ) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptMultiValued() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.DOUBLE) - .script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.DOUBLE) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) + ) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testPartiallyUnmappedWithFormat() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .format("0000.00") - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - String key = Strings.format("%07.2f", (double) i); - DoubleTerms.Bucket bucket = terms.getBucketByKey(key); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(key)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .format("0000.00") + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + String key = Strings.format("%07.2f", (double) i); + DoubleTerms.Bucket bucket = terms.getBucketByKey(key); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(key)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithSubTermsAgg() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", asc)) - .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation( - new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Avg avg = bucket.getAggregations().get("avg_i"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo((double) i)); - - DoubleTerms subTermsAgg = bucket.getAggregations().get("subTerms"); - assertThat(subTermsAgg, notNullValue()); - assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); - double j = i; - for (DoubleTerms.Bucket subBucket : subTermsAgg.getBuckets()) { - assertThat(subBucket, notNullValue()); - assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); - assertThat(subBucket.getDocCount(), equalTo(1L)); - j++; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", asc)) + .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation( + new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Avg avg = bucket.getAggregations().get("avg_i"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo((double) i)); + + DoubleTerms subTermsAgg = bucket.getAggregations().get("subTerms"); + assertThat(subTermsAgg, notNullValue()); + assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); + double j = i; + for (DoubleTerms.Bucket subBucket : subTermsAgg.getBuckets()) { + assertThat(subBucket, notNullValue()); + assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); + assertThat(subBucket.getDocCount(), equalTo(1L)); + j++; + } + } } - } + ); } public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("num_tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) - ).get(); - - assertNoFailures(response); - - DoubleTerms tags = response.getAggregations().get("num_tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("num_tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - DoubleTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - Filter filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("num_tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ), + response -> { + DoubleTerms tags = response.getAggregations().get("num_tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("num_tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + DoubleTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + Filter filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + ) ) - ) - ).get(); - - assertNoFailures(response); - - DoubleTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "1" is 2 - // the max for "0" is 4 - - DoubleTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Max max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + DoubleTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "1" is 2 + // the max for "0" is 4 + + DoubleTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1.0" : "0.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Max max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0.0" : "1.0")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception { @@ -710,87 +740,89 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 4; i >= 0; i--) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 4; i >= 0; i--) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.variance", asc)) - .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.variance", asc)) + .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testScriptScore() { @@ -808,28 +840,28 @@ public void testScriptScore() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(functionScoreQuery(scriptFunction(scoringScript))) - .addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.DOUBLE) - .script(aggregationScript) - ) - .get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - for (int i = 0; i < 3; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(functionScoreQuery(scriptFunction(scoringScript))) + .addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.DOUBLE) + .script(aggregationScript) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + for (int i = 0; i < 3; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (double) i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (double) i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { @@ -873,34 +905,35 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(double[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (DoubleTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (DoubleTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } public void testOtherDocCount() { @@ -918,8 +951,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1.5), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2.5) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1.5), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2.5) ); // Make sure we are starting with a clear cache @@ -933,13 +966,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -951,13 +984,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -969,8 +1002,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java index 5971e287882f2..b5dea9cbbba49 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -28,7 +27,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -49,8 +48,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < numTag1Docs; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource(jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject()) ); } @@ -60,18 +58,16 @@ public void setupSuiteScopeCluster() throws Exception { .field("tag", "tag2") .field("name", "name" + i) .endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted docs that match the filter - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, builders); @@ -79,70 +75,66 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1"))).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("tag1"); - assertThat(filter, notNullValue()); - assertThat(filter.getName(), equalTo("tag1")); - assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); + assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1"))), response -> { + Filter filter = response.getAggregations().get("tag1"); + assertThat(filter, notNullValue()); + assertThat(filter.getName(), equalTo("tag1")); + assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); + }); } // See NullPointer issue when filters are empty: // https://github.com/elastic/elasticsearch/issues/8438 public void testEmptyFilterDeclarations() throws Exception { QueryBuilder emptyFilter = new BoolQueryBuilder(); - SearchResponse response = prepareSearch("idx").addAggregation(filter("tag1", emptyFilter)).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("tag1"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo((long) numDocs)); + assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(filter("tag1", emptyFilter)), response -> { + Filter filter = response.getAggregations().get("tag1"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo((long) numDocs)); + }); } public void testWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filter("tag1", termQuery("tag", "tag1")).subAggregation(avg("avg_value").field("value")) - ).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("tag1"); - assertThat(filter, notNullValue()); - assertThat(filter.getName(), equalTo("tag1")); - assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); - assertThat((long) ((InternalAggregation) filter).getProperty("_count"), equalTo((long) numTag1Docs)); - - long sum = 0; - for (int i = 0; i < numTag1Docs; ++i) { - sum += i + 1; - } - assertThat(filter.getAggregations().asList().isEmpty(), is(false)); - Avg avgValue = filter.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); - assertThat((double) ((InternalAggregation) filter).getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1")).subAggregation(avg("avg_value").field("value"))), + response -> { + Filter filter = response.getAggregations().get("tag1"); + assertThat(filter, notNullValue()); + assertThat(filter.getName(), equalTo("tag1")); + assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); + assertThat((long) ((InternalAggregation) filter).getProperty("_count"), equalTo((long) numTag1Docs)); + + long sum = 0; + for (int i = 0; i < numTag1Docs; ++i) { + sum += i + 1; + } + assertThat(filter.getAggregations().asList().isEmpty(), is(false)); + Avg avgValue = filter.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); + assertThat((double) ((InternalAggregation) filter).getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs)); + } + ); } public void testAsSubAggregation() { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field("value").interval(2L).subAggregation(filter("filter", matchAllQuery())) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); - - for (Histogram.Bucket bucket : histo.getBuckets()) { - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertEquals(bucket.getDocCount(), filter.getDocCount()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field("value").interval(2L).subAggregation(filter("filter", matchAllQuery())) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); + + for (Histogram.Bucket bucket : histo.getBuckets()) { + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertEquals(bucket.getDocCount(), filter.getDocCount()); + } + } + ); } public void testWithContextBasedSubAggregation() throws Exception { @@ -160,19 +152,23 @@ public void testWithContextBasedSubAggregation() throws Exception { } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(filter("filter", matchAllQuery()))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, Matchers.notNullValue()); - assertThat(filter.getName(), equalTo("filter")); - assertThat(filter.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(filter("filter", matchAllQuery())) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, Matchers.notNullValue()); + assertThat(filter.getName(), equalTo("filter")); + assertThat(filter.getDocCount(), is(0L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java index fa8974371a935..b04cb5325a82d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -34,7 +33,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.filters; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -56,10 +55,10 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < numTag1Docs; i++) { XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted docs that match the filter - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); i++) { @@ -68,9 +67,9 @@ public void setupSuiteScopeCluster() throws Exception { .field("tag", "tag2") .field("name", "name" + i) .endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs + numTag2Docs; i < numDocs; i++) { @@ -80,17 +79,15 @@ public void setupSuiteScopeCluster() throws Exception { .field("tag", "tag3") .field("name", "name" + i) .endObject(); - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i).setSource(source)); } } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } indexRandom(true, builders); @@ -98,121 +95,125 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(2)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(2)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + } + ); } // See NullPointer issue when filters are empty: // https://github.com/elastic/elasticsearch/issues/8438 public void testEmptyFilterDeclarations() throws Exception { QueryBuilder emptyFilter = new BoolQueryBuilder(); - SearchResponse response = prepareSearch("idx").addAggregation( - filters("tags", randomOrder(new KeyedFilter("all", emptyFilter), new KeyedFilter("tag1", termQuery("tag", "tag1")))) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - Filters.Bucket allBucket = filters.getBucketByKey("all"); - assertThat(allBucket.getDocCount(), equalTo((long) numDocs)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters("tags", randomOrder(new KeyedFilter("all", emptyFilter), new KeyedFilter("tag1", termQuery("tag", "tag1")))) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + Filters.Bucket allBucket = filters.getBucketByKey("all"); + assertThat(allBucket.getDocCount(), equalTo((long) numDocs)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + } + ); } public void testWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).subAggregation(avg("avg_value").field("value")) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(2)); - assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(2)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - long sum = 0; - for (int i = 0; i < numTag1Docs; ++i) { - sum += i + 1; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Avg avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); - assertThat((String) propertiesKeys[0], equalTo("tag1")); - assertThat((long) propertiesDocCounts[0], equalTo((long) numTag1Docs)); - assertThat((double) propertiesCounts[0], equalTo((double) sum / numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - sum = 0; - for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { - sum += i; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); - assertThat(propertiesKeys[1], equalTo("tag2")); - assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); - assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).subAggregation(avg("avg_value").field("value")) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(2)); + assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(2)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + long sum = 0; + for (int i = 0; i < numTag1Docs; ++i) { + sum += i + 1; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Avg avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); + assertThat((String) propertiesKeys[0], equalTo("tag1")); + assertThat((long) propertiesDocCounts[0], equalTo((long) numTag1Docs)); + assertThat((double) propertiesCounts[0], equalTo((double) sum / numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + sum = 0; + for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { + sum += i; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); + assertThat(propertiesKeys[1], equalTo("tag2")); + assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); + assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); + } + ); } public void testAsSubAggregation() { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field("value").interval(2L).subAggregation(filters("filters", matchAllQuery())) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); - - for (Histogram.Bucket bucket : histo.getBuckets()) { - Filters filters = bucket.getAggregations().get("filters"); - assertThat(filters, notNullValue()); - assertThat(filters.getBuckets().size(), equalTo(1)); - Filters.Bucket filterBucket = filters.getBuckets().get(0); - assertEquals(bucket.getDocCount(), filterBucket.getDocCount()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field("value").interval(2L).subAggregation(filters("filters", matchAllQuery())) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); + + for (Histogram.Bucket bucket : histo.getBuckets()) { + Filters filters = bucket.getAggregations().get("filters"); + assertThat(filters, notNullValue()); + assertThat(filters.getBuckets().size(), equalTo(1)); + Filters.Bucket filterBucket = filters.getBuckets().get(0); + assertEquals(bucket.getDocCount(), filterBucket.getDocCount()); + } + } + ); } public void testWithContextBasedSubAggregation() throws Exception { @@ -236,232 +237,238 @@ public void testWithContextBasedSubAggregation() throws Exception { } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(filters("filters", new KeyedFilter("all", matchAllQuery()))) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Filters filters = bucket.getAggregations().get("filters"); - assertThat(filters, notNullValue()); - Filters.Bucket all = filters.getBucketByKey("all"); - assertThat(all, Matchers.notNullValue()); - assertThat(all.getKeyAsString(), equalTo("all")); - assertThat(all.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(filters("filters", new KeyedFilter("all", matchAllQuery()))) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Filters filters = bucket.getAggregations().get("filters"); + assertThat(filters, notNullValue()); + Filters.Bucket all = filters.getBucketByKey("all"); + assertThat(all, Matchers.notNullValue()); + assertThat(all.getKeyAsString(), equalTo("all")); + assertThat(all.getDocCount(), is(0L)); + } + ); } public void testSimpleNonKeyed() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2"))) - .get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2"))), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); - assertThat(filters.getBuckets().size(), equalTo(2)); + assertThat(filters.getBuckets().size(), equalTo(2)); - Collection buckets = filters.getBuckets(); - Iterator itr = buckets.iterator(); + Collection buckets = filters.getBuckets(); + Iterator itr = buckets.iterator(); - Filters.Bucket bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + Filters.Bucket bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + } + ); } public void testOtherBucket() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).otherBucket(true) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(3)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - - bucket = filters.getBucketByKey("_other_"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(3)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + + bucket = filters.getBucketByKey("_other_"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + } + ); } public void testOtherNamedBucket() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).otherBucket(true).otherBucketKey("foobar") - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(3)); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - - bucket = filters.getBucketByKey("foobar"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true).otherBucketKey("foobar") + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(3)); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + + bucket = filters.getBucketByKey("foobar"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + } + ); } public void testOtherNonKeyed() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2")).otherBucket(true) - ).get(); - - assertNoFailures(response); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2")).otherBucket(true)), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); + assertThat(filters.getBuckets().size(), equalTo(3)); - assertThat(filters.getBuckets().size(), equalTo(3)); + Collection buckets = filters.getBuckets(); + Iterator itr = buckets.iterator(); - Collection buckets = filters.getBuckets(); - Iterator itr = buckets.iterator(); + Filters.Bucket bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - Filters.Bucket bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - - bucket = itr.next(); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + bucket = itr.next(); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + } + ); } public void testOtherWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - filters( - "tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) - ).otherBucket(true).subAggregation(avg("avg_value").field("value")) - ).get(); - - assertNoFailures(response); - - Filters filters = response.getAggregations().get("tags"); - assertThat(filters, notNullValue()); - assertThat(filters.getName(), equalTo("tags")); - - assertThat(filters.getBuckets().size(), equalTo(3)); - assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); - - Filters.Bucket bucket = filters.getBucketByKey("tag1"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); - long sum = 0; - for (int i = 0; i < numTag1Docs; ++i) { - sum += i + 1; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Avg avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); - assertThat(propertiesKeys[0], equalTo("tag1")); - assertThat(propertiesDocCounts[0], equalTo((long) numTag1Docs)); - assertThat(propertiesCounts[0], equalTo((double) sum / numTag1Docs)); - - bucket = filters.getBucketByKey("tag2"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); - sum = 0; - for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { - sum += i; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); - assertThat(propertiesKeys[1], equalTo("tag2")); - assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); - assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); - - bucket = filters.getBucketByKey("_other_"); - assertThat(bucket, Matchers.notNullValue()); - assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); - sum = 0; - for (int i = numTag1Docs + numTag2Docs; i < numDocs; ++i) { - sum += i; - } - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - avgValue = bucket.getAggregations().get("avg_value"); - assertThat(avgValue, notNullValue()); - assertThat(avgValue.getName(), equalTo("avg_value")); - assertThat(avgValue.getValue(), equalTo((double) sum / numOtherDocs)); - assertThat(propertiesKeys[2], equalTo("_other_")); - assertThat(propertiesDocCounts[2], equalTo((long) numOtherDocs)); - assertThat(propertiesCounts[2], equalTo((double) sum / numOtherDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true).subAggregation(avg("avg_value").field("value")) + ), + response -> { + Filters filters = response.getAggregations().get("tags"); + assertThat(filters, notNullValue()); + assertThat(filters.getName(), equalTo("tags")); + + assertThat(filters.getBuckets().size(), equalTo(3)); + assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); + + Filters.Bucket bucket = filters.getBucketByKey("tag1"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs)); + long sum = 0; + for (int i = 0; i < numTag1Docs; ++i) { + sum += i + 1; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Avg avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); + assertThat(propertiesKeys[0], equalTo("tag1")); + assertThat(propertiesDocCounts[0], equalTo((long) numTag1Docs)); + assertThat(propertiesCounts[0], equalTo((double) sum / numTag1Docs)); + + bucket = filters.getBucketByKey("tag2"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs)); + sum = 0; + for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); ++i) { + sum += i; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs)); + assertThat(propertiesKeys[1], equalTo("tag2")); + assertThat(propertiesDocCounts[1], equalTo((long) numTag2Docs)); + assertThat(propertiesCounts[1], equalTo((double) sum / numTag2Docs)); + + bucket = filters.getBucketByKey("_other_"); + assertThat(bucket, Matchers.notNullValue()); + assertThat(bucket.getDocCount(), equalTo((long) numOtherDocs)); + sum = 0; + for (int i = numTag1Docs + numTag2Docs; i < numDocs; ++i) { + sum += i; + } + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + avgValue = bucket.getAggregations().get("avg_value"); + assertThat(avgValue, notNullValue()); + assertThat(avgValue.getName(), equalTo("avg_value")); + assertThat(avgValue.getValue(), equalTo((double) sum / numOtherDocs)); + assertThat(propertiesKeys[2], equalTo("_other_")); + assertThat(propertiesDocCounts[2], equalTo((long) numOtherDocs)); + assertThat(propertiesCounts[2], equalTo((double) sum / numOtherDocs)); + } + ); } public void testEmptyAggregationWithOtherBucket() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(filters("filters", new KeyedFilter("foo", matchAllQuery())).otherBucket(true).otherBucketKey("bar")) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Filters filters = bucket.getAggregations().get("filters"); - assertThat(filters, notNullValue()); - - Filters.Bucket other = filters.getBucketByKey("bar"); - assertThat(other, Matchers.notNullValue()); - assertThat(other.getKeyAsString(), equalTo("bar")); - assertThat(other.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(filters("filters", new KeyedFilter("foo", matchAllQuery())).otherBucket(true).otherBucketKey("bar")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Filters filters = bucket.getAggregations().get("filters"); + assertThat(filters, notNullValue()); + + Filters.Bucket other = filters.getBucketByKey("bar"); + assertThat(other, Matchers.notNullValue()); + assertThat(other.getKeyAsString(), equalTo("bar")); + assertThat(other.getDocCount(), is(0L)); + } + ); } private static KeyedFilter[] randomOrder(KeyedFilter... filters) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 7639445f1f5ac..0ed83f73e418d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; @@ -39,7 +38,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.geoDistance; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -66,7 +65,7 @@ private IndexRequestBuilder indexCity(String idx, String name, String... latLons } source.endArray(); source = source.endObject(); - return client().prepareIndex(idx).setSource(source); + return prepareIndex(idx).setSource(source); } @Override @@ -121,8 +120,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field("value", i * 2).field("location", "52.0945, 5.116").endObject()) ); } @@ -142,292 +140,298 @@ public void testSimple() throws Exception { for (Consumer range : ranges) { range.accept(builder); } - SearchResponse response = prepareSearch("idx").addAggregation(builder).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse(prepareSearch("idx").addAggregation(builder), response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + }); } public void testSimpleWithCustomKeys() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo("ring1", 500) - .addRange("ring2", 500, 1000) - .addUnboundedFrom("ring3", 1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("ring1")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("ring2")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("ring3")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo("ring1", 500) + .addRange("ring2", 500, 1000) + .addUnboundedFrom("ring3", 1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("ring1")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("ring2")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("ring3")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } public void testUnmapped() throws Exception { clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } public void testWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - .subAggregation(terms("cities").field("city").collectMode(randomFrom(SubAggCollectionMode.values()))) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - assertThat(((InternalAggregation) geoDist).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoDist).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoDist).getProperty("_count"); - Object[] propertiesCities = (Object[]) ((InternalAggregation) geoDist).getProperty("cities"); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Terms cities = bucket.getAggregations().get("cities"); - assertThat(cities, Matchers.notNullValue()); - Set names = new HashSet<>(); - for (Terms.Bucket city : cities.getBuckets()) { - names.add(city.getKeyAsString()); - } - assertThat(names.contains("utrecht") && names.contains("haarlem"), is(true)); - assertThat((String) propertiesKeys[0], equalTo("*-500.0")); - assertThat((long) propertiesDocCounts[0], equalTo(2L)); - assertThat((Terms) propertiesCities[0], sameInstance(cities)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - cities = bucket.getAggregations().get("cities"); - assertThat(cities, Matchers.notNullValue()); - names = new HashSet<>(); - for (Terms.Bucket city : cities.getBuckets()) { - names.add(city.getKeyAsString()); - } - assertThat(names.contains("berlin") && names.contains("prague"), is(true)); - assertThat((String) propertiesKeys[1], equalTo("500.0-1000.0")); - assertThat((long) propertiesDocCounts[1], equalTo(2L)); - assertThat((Terms) propertiesCities[1], sameInstance(cities)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - cities = bucket.getAggregations().get("cities"); - assertThat(cities, Matchers.notNullValue()); - names = new HashSet<>(); - for (Terms.Bucket city : cities.getBuckets()) { - names.add(city.getKeyAsString()); - } - assertThat(names.contains("tel-aviv"), is(true)); - assertThat((String) propertiesKeys[2], equalTo("1000.0-*")); - assertThat((long) propertiesDocCounts[2], equalTo(1L)); - assertThat((Terms) propertiesCities[2], sameInstance(cities)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + .subAggregation(terms("cities").field("city").collectMode(randomFrom(SubAggCollectionMode.values()))) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + assertThat(((InternalAggregation) geoDist).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoDist).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoDist).getProperty("_count"); + Object[] propertiesCities = (Object[]) ((InternalAggregation) geoDist).getProperty("cities"); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Terms cities = bucket.getAggregations().get("cities"); + assertThat(cities, Matchers.notNullValue()); + Set names = new HashSet<>(); + for (Terms.Bucket city : cities.getBuckets()) { + names.add(city.getKeyAsString()); + } + assertThat(names.contains("utrecht") && names.contains("haarlem"), is(true)); + assertThat((String) propertiesKeys[0], equalTo("*-500.0")); + assertThat((long) propertiesDocCounts[0], equalTo(2L)); + assertThat((Terms) propertiesCities[0], sameInstance(cities)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + cities = bucket.getAggregations().get("cities"); + assertThat(cities, Matchers.notNullValue()); + names = new HashSet<>(); + for (Terms.Bucket city : cities.getBuckets()) { + names.add(city.getKeyAsString()); + } + assertThat(names.contains("berlin") && names.contains("prague"), is(true)); + assertThat((String) propertiesKeys[1], equalTo("500.0-1000.0")); + assertThat((long) propertiesDocCounts[1], equalTo(2L)); + assertThat((Terms) propertiesCities[1], sameInstance(cities)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + cities = bucket.getAggregations().get("cities"); + assertThat(cities, Matchers.notNullValue()); + names = new HashSet<>(); + for (Terms.Bucket city : cities.getBuckets()) { + names.add(city.getKeyAsString()); + } + assertThat(names.contains("tel-aviv"), is(true)); + assertThat((String) propertiesKeys[2], equalTo("1000.0-*")); + assertThat((long) propertiesDocCounts[2], equalTo(1L)); + assertThat((Terms) propertiesCities[2], sameInstance(cities)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location").addRange("0-100", 0.0, 100.0)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Range geoDistance = bucket.getAggregations().get("geo_dist"); - // TODO: use diamond once JI-9019884 is fixed - List buckets = new ArrayList<>(geoDistance.getBuckets()); - assertThat(geoDistance, Matchers.notNullValue()); - assertThat(geoDistance.getName(), equalTo("geo_dist")); - assertThat(buckets.size(), is(1)); - assertThat((String) buckets.get(0).getKey(), equalTo("0-100")); - assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(100.0)); - assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); - assertThat(buckets.get(0).getToAsString(), equalTo("100.0")); - assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location").addRange("0-100", 0.0, 100.0) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Range geoDistance = bucket.getAggregations().get("geo_dist"); + // TODO: use diamond once JI-9019884 is fixed + List buckets = new ArrayList<>(geoDistance.getBuckets()); + assertThat(geoDistance, Matchers.notNullValue()); + assertThat(geoDistance.getName(), equalTo("geo_dist")); + assertThat(buckets.size(), is(1)); + assertThat((String) buckets.get(0).getKey(), equalTo("0-100")); + assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(100.0)); + assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); + assertThat(buckets.get(0).getToAsString(), equalTo("100.0")); + assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + } + ); } public void testNoRangesInQuery() { @@ -442,49 +446,50 @@ public void testNoRangesInQuery() { } public void testMultiValues() throws Exception { - SearchResponse response = prepareSearch("idx-multi").addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range geoDist = response.getAggregations().get("amsterdam_rings"); - assertThat(geoDist, notNullValue()); - assertThat(geoDist.getName(), equalTo("amsterdam_rings")); - List buckets = geoDist.getBuckets(); - assertThat(geoDist.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-500.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); - assertThat(bucket.getFromAsString(), equalTo("0.0")); - assertThat(bucket.getToAsString(), equalTo("500.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); - assertThat(bucket.getFromAsString(), equalTo("500.0")); - assertThat(bucket.getToAsString(), equalTo("1000.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx-multi").addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ), + response -> { + Range geoDist = response.getAggregations().get("amsterdam_rings"); + assertThat(geoDist, notNullValue()); + assertThat(geoDist.getName(), equalTo("amsterdam_rings")); + List buckets = geoDist.getBuckets(); + assertThat(geoDist.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("*-500.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0)); + assertThat(bucket.getFromAsString(), equalTo("0.0")); + assertThat(bucket.getToAsString(), equalTo("500.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("500.0-1000.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0)); + assertThat(bucket.getFromAsString(), equalTo("500.0")); + assertThat(bucket.getToAsString(), equalTo("1000.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 1cd8d5bc2fc3d..57b11df3b7d31 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; @@ -36,7 +35,7 @@ import static org.elasticsearch.geometry.utils.Geohash.stringEncode; import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -63,7 +62,7 @@ private static IndexRequestBuilder indexCity(String index, String name, List buckets = geoGrid.getBuckets(); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoGrid).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoGrid).getProperty("_count"); - for (int i = 0; i < buckets.size(); i++) { - GeoGrid.Bucket cell = buckets.get(i); - String geohash = cell.getKeyAsString(); - - long bucketCount = cell.getDocCount(); - int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - GeoPoint geoPoint = (GeoPoint) propertiesKeys[i]; - assertThat(stringEncode(geoPoint.lon(), geoPoint.lat(), precision), equalTo(geohash)); - assertThat((long) propertiesDocCounts[i], equalTo(bucketCount)); - } + final int finalPrecision = precision; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + List buckets = geoGrid.getBuckets(); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoGrid).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoGrid).getProperty("_count"); + for (int i = 0; i < buckets.size(); i++) { + GeoGrid.Bucket cell = buckets.get(i); + String geohash = cell.getKeyAsString(); + + long bucketCount = cell.getDocCount(); + int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + GeoPoint geoPoint = (GeoPoint) propertiesKeys[i]; + assertThat(stringEncode(geoPoint.lon(), geoPoint.lat(), finalPrecision), equalTo(geohash)); + assertThat((long) propertiesDocCounts[i], equalTo(bucketCount)); + } + } + ); } } public void testMultivalued() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("multi_valued_idx").addAggregation( - geohashGrid("geohashgrid").field("location").precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - - long bucketCount = cell.getDocCount(); - int expectedBucketCount = multiValuedExpectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - } + assertNoFailuresAndResponse( + prepareSearch("multi_valued_idx").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + + long bucketCount = cell.getDocCount(); + int expectedBucketCount = multiValuedExpectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + } + } + ); } } @@ -180,85 +179,85 @@ public void testFiltered() throws Exception { GeoBoundingBoxQueryBuilder bbox = new GeoBoundingBoxQueryBuilder("location"); bbox.setCorners(smallestGeoHash).queryName("bbox"); for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx").addAggregation( - AggregationBuilders.filter("filtered", bbox) - .subAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) - ).get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("filtered"); - - GeoGrid geoGrid = filter.getAggregations().get("geohashgrid"); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - long bucketCount = cell.getDocCount(); - int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash)); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.filter("filtered", bbox) + .subAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + ), + response -> { + Filter filter = response.getAggregations().get("filtered"); + + GeoGrid geoGrid = filter.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + long bucketCount = cell.getDocCount(); + int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash)); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + } + } + ); } } public void testUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - geohashGrid("geohashgrid").field("location").precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - assertThat(geoGrid.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + assertThat(geoGrid.getBuckets().size(), equalTo(0)); + } + ); } } public void testPartiallyUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - geohashGrid("geohashgrid").field("location").precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - - long bucketCount = cell.getDocCount(); - int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + + long bucketCount = cell.getDocCount(); + int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); + } + } + ); } } public void testTopMatch() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { - SearchResponse response = prepareSearch("idx").addAggregation( - geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision) - ).get(); - - assertNoFailures(response); - - GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - // Check we only have one bucket with the best match for that resolution - assertThat(geoGrid.getBuckets().size(), equalTo(1)); - for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { - String geohash = cell.getKeyAsString(); - long bucketCount = cell.getDocCount(); - int expectedBucketCount = 0; - for (var entry : expectedDocCountsForGeoHash.entrySet()) { - if (entry.getKey().length() == precision) { - expectedBucketCount = Math.max(expectedBucketCount, entry.getValue()); + final int finalPrecision = precision; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision) + ), + response -> { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + // Check we only have one bucket with the best match for that resolution + assertThat(geoGrid.getBuckets().size(), equalTo(1)); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { + String geohash = cell.getKeyAsString(); + long bucketCount = cell.getDocCount(); + int expectedBucketCount = 0; + for (var entry : expectedDocCountsForGeoHash.entrySet()) { + if (entry.getKey().length() == finalPrecision) { + expectedBucketCount = Math.max(expectedBucketCount, entry.getValue()); + } + } + assertNotSame(bucketCount, 0); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); } } - assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java index 347b2324027c0..abe93597f02b9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -21,7 +20,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -41,15 +40,13 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(3, 20); for (int i = 0; i < numDocs / 2; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i + 1) + prepareIndex("idx").setId("" + i + 1) .setSource(jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject()) ); } for (int i = numDocs / 2; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i + 1) + prepareIndex("idx").setId("" + i + 1) .setSource( jsonBuilder().startObject().field("value", i + 1).field("tag", "tag2").field("name", "name" + i + 1).endObject() ) @@ -60,32 +57,32 @@ public void setupSuiteScopeCluster() throws Exception { } public void testWithStatsSubAggregator() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.termQuery("tag", "tag1")) - .addAggregation(global("global").subAggregation(stats("value_stats").field("value"))) - .get(); - - assertNoFailures(response); - - Global global = response.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo((long) numDocs)); - assertThat((long) ((InternalAggregation) global).getProperty("_count"), equalTo((long) numDocs)); - assertThat(global.getAggregations().asList().isEmpty(), is(false)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.termQuery("tag", "tag1")) + .addAggregation(global("global").subAggregation(stats("value_stats").field("value"))), + response -> { + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo((long) numDocs)); + assertThat((long) ((InternalAggregation) global).getProperty("_count"), equalTo((long) numDocs)); + assertThat(global.getAggregations().asList().isEmpty(), is(false)); - Stats stats = global.getAggregations().get("value_stats"); - assertThat((Stats) ((InternalAggregation) global).getProperty("value_stats"), sameInstance(stats)); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("value_stats")); - long sum = 0; - for (int i = 0; i < numDocs; ++i) { - sum += i + 1; - } - assertThat(stats.getAvg(), equalTo((double) sum / numDocs)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo((double) numDocs)); - assertThat(stats.getCount(), equalTo((long) numDocs)); - assertThat(stats.getSum(), equalTo((double) sum)); + Stats stats = global.getAggregations().get("value_stats"); + assertThat((Stats) ((InternalAggregation) global).getProperty("value_stats"), sameInstance(stats)); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("value_stats")); + long sum = 0; + for (int i = 0; i < numDocs; ++i) { + sum += i + 1; + } + assertThat(stats.getAvg(), equalTo((double) sum / numDocs)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo((double) numDocs)); + assertThat(stats.getCount(), equalTo((long) numDocs)); + assertThat(stats.getSum(), equalTo((double) sum)); + } + ); } public void testNonTopLevel() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 07b678e89c024..e5d13627e1da0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -52,6 +51,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -140,18 +140,17 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i + 1) - .startArray(MULTI_VALUED_FIELD_NAME) - .value(i + 1) - .value(i + 2) - .endArray() - .field("tag", "tag" + i) - .field("constant", 1) - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i + 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i + 1) + .value(i + 2) + .endArray() + .field("tag", "tag" + i) + .field("constant", 1) + .endObject() + ) ); } @@ -160,8 +159,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -191,93 +189,105 @@ private void getMultiSortDocs(List builders) throws IOExcep assertAcked(indicesAdmin().prepareCreate("sort_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=double").get()); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject() + ) ); } public void testSingleValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) - .get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void singleValuedField_withOffset() throws Exception { int interval1 = 10; int offset = 5; - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval1).offset(offset) - ).get(); - - // from setup we have between 6 and 20 documents, each with value 1 in test field - int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); - - // first bucket should start at -5, contain 4 documents - Histogram.Bucket bucket = histo.getBuckets().get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(-5L)); - assertThat(bucket.getDocCount(), equalTo(4L)); - - // last bucket should have (numDocs % interval + 1) docs - bucket = histo.getBuckets().get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(numDocs % interval1 + 5L)); - assertThat(bucket.getDocCount(), equalTo((numDocs % interval) + 1L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval1).offset(offset)), + response -> { + + // from setup we have between 6 and 20 documents, each with value 1 in test field + int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); + + // first bucket should start at -5, contain 4 documents + Histogram.Bucket bucket = histo.getBuckets().get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(-5L)); + assertThat(bucket.getDocCount(), equalTo(4L)); + + // last bucket should have (numDocs % interval + 1) docs + bucket = histo.getBuckets().get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(numDocs % interval1 + 5L)); + assertThat(bucket.getDocCount(), equalTo((numDocs % interval) + 1L)); + } + ); } /** @@ -286,352 +296,365 @@ public void singleValuedField_withOffset() throws Exception { */ public void testSingleValuedFieldWithRandomOffset() throws Exception { int offset = randomIntBetween(2, interval); - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).offset(offset) - ).get(); - assertNoFailures(response); - // shifting by offset>2 creates new extra bucket [0,offset-1] - // if offset is >= number of values in original last bucket, that effect is canceled - int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); - - long docsCounted = 0; - for (int i = 0; i < expectedNumberOfBuckets; ++i) { - Histogram.Bucket bucket = histo.getBuckets().get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i - 1) * interval + offset))); - if (i == 0) { - // first bucket - long expectedFirstBucketCount = offset - 1; - assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); - docsCounted += expectedFirstBucketCount; - } else if (i < expectedNumberOfBuckets - 1) { - assertThat(bucket.getDocCount(), equalTo((long) interval)); - docsCounted += interval; - } else { - assertThat(bucket.getDocCount(), equalTo((long) numDocs - docsCounted)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).offset(offset)), + response -> { + // shifting by offset>2 creates new extra bucket [0,offset-1] + // if offset is >= number of values in original last bucket, that effect is canceled + int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); + + long docsCounted = 0; + for (int i = 0; i < expectedNumberOfBuckets; ++i) { + Histogram.Bucket bucket = histo.getBuckets().get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i - 1) * interval + offset))); + if (i == 0) { + // first bucket + long expectedFirstBucketCount = offset - 1; + assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); + docsCounted += expectedFirstBucketCount; + } else if (i < expectedNumberOfBuckets - 1) { + assertThat(bucket.getDocCount(), equalTo((long) interval)); + docsCounted += interval; + } else { + assertThat(bucket.getDocCount(), equalTo((long) numDocs - docsCounted)); + } + } } - } + ); } public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testsingleValuedFieldOrderedByKeyDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testSingleValuedFieldOrderedByCountAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(true)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set buckets = new HashSet<>(); - List histoBuckets = new ArrayList<>(histo.getBuckets()); - long previousCount = Long.MIN_VALUE; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = histoBuckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertEquals(0, key % interval); - assertTrue(buckets.add(key)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); - assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount)); - previousCount = bucket.getDocCount(); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(true)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set buckets = new HashSet<>(); + List histoBuckets = new ArrayList<>(histo.getBuckets()); + long previousCount = Long.MIN_VALUE; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = histoBuckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertEquals(0, key % interval); + assertTrue(buckets.add(key)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); + assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount)); + previousCount = bucket.getDocCount(); + } + } + ); } public void testSingleValuedFieldOrderedByCountDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set buckets = new HashSet<>(); - List histoBuckets = new ArrayList<>(histo.getBuckets()); - long previousCount = Long.MAX_VALUE; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = histoBuckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertEquals(0, key % interval); - assertTrue(buckets.add(key)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); - assertThat(bucket.getDocCount(), lessThanOrEqualTo(previousCount)); - previousCount = bucket.getDocCount(); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set buckets = new HashSet<>(); + List histoBuckets = new ArrayList<>(histo.getBuckets()); + long previousCount = Long.MAX_VALUE; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = histoBuckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertEquals(0, key % interval); + assertTrue(buckets.add(key)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)])); + assertThat(bucket.getDocCount(), lessThanOrEqualTo(previousCount)); + previousCount = bucket.getDocCount(); + } + } + ); } public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(numValueBuckets)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == i) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(numValueBuckets)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == i) { + s += j + 1; + } + } + assertThat(sum.value(), equalTo((double) s)); + assertEquals(propertiesKeys[i], (double) i * interval); + assertThat(propertiesDocCounts[i], equalTo(valueCounts[i])); + assertThat(propertiesCounts[i], equalTo((double) s)); } } - assertThat(sum.value(), equalTo((double) s)); - assertEquals(propertiesKeys[i], (double) i * interval); - assertThat(propertiesDocCounts[i], equalTo(valueCounts[i])); - assertThat(propertiesCounts[i], equalTo((double) s)); - } + ); } public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("sum", true)) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double previousSum = Double.NEGATIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == b) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("sum", true)) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double previousSum = Double.NEGATIVE_INFINITY; + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == b) { + s += j + 1; + } + } + assertThat(sum.value(), equalTo((double) s)); + assertThat(sum.value(), greaterThanOrEqualTo(previousSum)); + previousSum = s; } } - assertThat(sum.value(), equalTo((double) s)); - assertThat(sum.value(), greaterThanOrEqualTo(previousSum)); - previousSum = s; - } + ); } public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("sum", false)) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double previousSum = Double.POSITIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == b) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("sum", false)) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double previousSum = Double.POSITIVE_INFINITY; + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == b) { + s += j + 1; + } + } + assertThat(sum.value(), equalTo((double) s)); + assertThat(sum.value(), lessThanOrEqualTo(previousSum)); + previousSum = s; } } - assertThat(sum.value(), equalTo((double) s)); - assertThat(sum.value(), lessThanOrEqualTo(previousSum)); - previousSum = s; - } + ); } public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("stats.sum", false)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double previousSum = Double.POSITIVE_INFINITY; - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == b) { - s += j + 1; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("stats.sum", false)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double previousSum = Double.POSITIVE_INFINITY; + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == b) { + s += j + 1; + } + } + assertThat(stats.getSum(), equalTo((double) s)); + assertThat(stats.getSum(), lessThanOrEqualTo(previousSum)); + previousSum = s; } } - assertThat(stats.getSum(), equalTo((double) s)); - assertThat(stats.getSum(), lessThanOrEqualTo(previousSum)); - previousSum = s; - } + ); } public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("filter>max", asc)) - .subAggregation(filter("filter", matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME))) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - Set visited = new HashSet<>(); - double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - long key = ((Number) bucket.getKey()).longValue(); - assertTrue(visited.add(key)); - int b = (int) (key / interval); - assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(bucket.getDocCount(), equalTo(filter.getDocCount())); - Max max = filter.getAggregations().get("max"); - assertThat(max, Matchers.notNullValue()); - assertThat(max.value(), asc ? greaterThanOrEqualTo(prevMax) : lessThanOrEqualTo(prevMax)); - prevMax = max.value(); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("filter>max", asc)) + .subAggregation(filter("filter", matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME))) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + Set visited = new HashSet<>(); + double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + long key = ((Number) bucket.getKey()).longValue(); + assertTrue(visited.add(key)); + int b = (int) (key / interval); + assertThat(bucket.getDocCount(), equalTo(valueCounts[b])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(bucket.getDocCount(), equalTo(filter.getDocCount())); + Max max = filter.getAggregations().get("max"); + assertThat(max, Matchers.notNullValue()); + assertThat(max.value(), asc ? greaterThanOrEqualTo(prevMax) : lessThanOrEqualTo(prevMax)); + prevMax = max.value(); + } + } + ); } public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("max_constant", randomBoolean())) - .subAggregation(max("max_constant").field("constant")) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant")) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { @@ -662,243 +685,249 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1; - final long[] counts = new long[(numDocs + 1) / interval + 1]; - for (int i = 0; i < numDocs; ++i) { - ++counts[(i + 2) / interval]; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(interval) + ), + response -> { + final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1; + final long[] counts = new long[(numDocs + 1) / interval + 1]; + for (int i = 0; i < numDocs; ++i) { + ++counts[(i + 2) / interval]; + } - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numBuckets)); - - for (int i = 0; i < numBuckets; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - int key = ((2 / interval) + i) * interval; - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); - assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); - } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets)); + + for (int i = 0; i < numBuckets; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + int key = ((2 / interval) + i) * interval; + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); + assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); + } + } + ); } public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval)) - .get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValuesBuckets)); - - for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValuesBuckets)); + + for (int i = 0; i < numValuesBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); + } + } + ); } public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets)); - - List buckets = new ArrayList<>(histo.getBuckets()); - for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false)) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValuesBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); + } + } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field(MULTI_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1; - final long[] counts = new long[(numDocs + 2) / interval + 1]; - for (int i = 0; i < numDocs; ++i) { - final int bucket1 = (i + 2) / interval; - final int bucket2 = (i + 3) / interval; - ++counts[bucket1]; - if (bucket1 != bucket2) { - ++counts[bucket2]; - } - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(MULTI_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(interval) + ), + response -> { + final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1; + final long[] counts = new long[(numDocs + 2) / interval + 1]; + for (int i = 0; i < numDocs; ++i) { + final int bucket1 = (i + 2) / interval; + final int bucket2 = (i + 3) / interval; + ++counts[bucket1]; + if (bucket1 != bucket2) { + ++counts[bucket2]; + } + } - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numBuckets)); - - for (int i = 0; i < numBuckets; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - int key = ((2 / interval) + i) * interval; - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); - assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); - } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets)); + + for (int i = 0; i < numBuckets; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + int key = ((2 / interval) + i) * interval; + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); + assertThat(bucket.getDocCount(), equalTo(counts[key / interval])); + } + } + ); } public void testScriptSingleValue() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap())) + .interval(interval) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testScriptMultiValued() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap())) - .interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValuesBuckets)); - - for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap())) + .interval(interval) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValuesBuckets)); + + for (int i = 0; i < numValuesBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); + } + } + ); } public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - assertThat(histo.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(0)); + } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + ); } public void testPartiallyUnmappedWithExtendedBounds() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(-1 * 2 * interval, valueCounts.length * interval) - ).get(); - - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets + 3)); - - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * 2 * interval)); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * interval)); - assertThat(bucket.getDocCount(), equalTo(0L)); - - for (int i = 2; i < numValueBuckets + 2; ++i) { - bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) (i - 2) * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i - 2])); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(-1 * 2 * interval, valueCounts.length * interval) + ), + response -> { + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets + 3)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * 2 * interval)); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * interval)); + assertThat(bucket.getDocCount(), equalTo(0L)); + + for (int i = 2; i < numValueBuckets + 2; ++i) { + bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) (i - 2) * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i - 2])); + } + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(1L) - .minDocCount(0) - .subAggregation(histogram("sub_histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - List buckets = histo.getBuckets(); - Histogram.Bucket bucket = buckets.get(1); - assertThat(bucket, Matchers.notNullValue()); - - histo = bucket.getAggregations().get("sub_histo"); - assertThat(histo, Matchers.notNullValue()); - assertThat(histo.getName(), equalTo("sub_histo")); - assertThat(histo.getBuckets().isEmpty(), is(true)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1L) + .minDocCount(0) + .subAggregation(histogram("sub_histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + List buckets = histo.getBuckets(); + Histogram.Bucket bucket = buckets.get(1); + assertThat(bucket, Matchers.notNullValue()); + + histo = bucket.getAggregations().get("sub_histo"); + assertThat(histo, Matchers.notNullValue()); + assertThat(histo.getName(), equalTo("sub_histo")); + assertThat(histo.getBuckets().isEmpty(), is(true)); + } + ); } public void testSingleValuedFieldWithExtendedBounds() throws Exception { @@ -934,18 +963,35 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception { int bucketsCount = numValueBuckets + addedBucketsLeft + addedBucketsRight; long[] extendedValueCounts = new long[bucketsCount]; System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length); - - SearchResponse response = null; + final long startKey = Math.min(boundsMinKey, 0); try { - response = prepareSearch("idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0).extendedBounds(boundsMin, boundsMax) - ).get(); - - if (invalidBoundsError) { - fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); - return; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .minDocCount(0) + .extendedBounds(boundsMin, boundsMax) + ), + response -> { + if (invalidBoundsError) { + fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); + } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(bucketsCount)); + + long key = startKey; + for (int i = 0; i < bucketsCount; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); + key += interval; + } + } + ); } catch (IllegalArgumentException e) { if (invalidBoundsError) { // expected @@ -954,22 +1000,6 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception { throw e; } } - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(bucketsCount)); - - long key = Math.min(boundsMinKey, 0); - for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); - key += interval; - } } public void testEmptyWithExtendedBounds() throws Exception { @@ -1005,47 +1035,42 @@ public void testEmptyWithExtendedBounds() throws Exception { int bucketsCount = (int) ((boundsMaxKey - boundsMinKey) / interval) + 1; long[] extendedValueCounts = new long[valueCounts.length + addedBucketsLeft + addedBucketsRight]; System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length); - - SearchResponse response = null; + final long startKey = boundsMinKey; try { - response = prepareSearch("idx").setQuery(QueryBuilders.termQuery("foo", "bar")) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .minDocCount(0) - .extendedBounds(boundsMin, boundsMax) - ) - .get(); - - if (invalidBoundsError) { - fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); - return; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.termQuery("foo", "bar")) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .minDocCount(0) + .extendedBounds(boundsMin, boundsMax) + ), + response -> { + if (invalidBoundsError) { + fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); + + } + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(bucketsCount)); + + long key = startKey; + for (int i = 0; i < bucketsCount; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0L)); + key += interval; + } + } + ); } catch (IllegalArgumentException e) { - if (invalidBoundsError) { - // expected - return; - } else { + if (invalidBoundsError == false) { throw e; } } - assertNoFailures(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(bucketsCount)); - - long key = boundsMinKey; - for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0L)); - key += interval; - } } /** @@ -1065,20 +1090,22 @@ public void testDecimalIntervalAndOffset() throws Exception { assertAcked(prepareCreate("decimal_values").setMapping("d", "type=float").get()); indexRandom( true, - client().prepareIndex("decimal_values").setId("1").setSource("d", -0.6), - client().prepareIndex("decimal_values").setId("2").setSource("d", 0.1) + prepareIndex("decimal_values").setId("1").setSource("d", -0.6), + prepareIndex("decimal_values").setId("2").setSource("d", 0.1) ); - SearchResponse r = prepareSearch("decimal_values").addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)).get(); - assertNoFailures(r); - - Histogram histogram = r.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertEquals(2, buckets.size()); - assertEquals(-0.65, (double) buckets.get(0).getKey(), 0.01d); - assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d); - assertEquals(1, buckets.get(1).getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("decimal_values").addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(-0.65, (double) buckets.get(0).getKey(), 0.01d); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d); + assertEquals(1, buckets.get(1).getDocCount()); + } + ); } /** @@ -1092,8 +1119,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("d", -0.6), - client().prepareIndex("cache_test_idx").setId("2").setSource("d", 0.1) + prepareIndex("cache_test_idx").setId("1").setSource("d", -0.6), + prepareIndex("cache_test_idx").setId("2").setSource("d", 0.1) ); // Make sure we are starting with a clear cache @@ -1107,15 +1134,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - histogram("histo").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())) - .interval(0.7) - .offset(0.05) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + histogram("histo").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())) + .interval(0.7) + .offset(0.05) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1127,15 +1154,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - histogram("histo").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(0.7) - .offset(0.05) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + histogram("histo").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(0.7) + .offset(0.05) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1147,8 +1174,9 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)).get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1222,69 +1250,72 @@ public void testHardBounds() throws Exception { assertAcked(prepareCreate("test").setMapping("d", "type=double").get()); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("d", -0.6), - client().prepareIndex("test").setId("2").setSource("d", 0.5), - client().prepareIndex("test").setId("3").setSource("d", 0.1) + prepareIndex("test").setId("1").setSource("d", -0.6), + prepareIndex("test").setId("2").setSource("d", 0.5), + prepareIndex("test").setId("3").setSource("d", 0.1) ); - SearchResponse r = prepareSearch("test").addAggregation( - histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, null)) - ).get(); - assertNoFailures(r); - - Histogram histogram = r.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); - assertEquals(0.5, (double) buckets.get(4).getKey(), 0.01d); - - r = prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(null, 0.0))).get(); - assertNoFailures(r); - - histogram = r.getAggregations().get("histo"); - buckets = histogram.getBuckets(); - assertEquals(1, buckets.size()); - assertEquals(-0.6, (double) buckets.get(0).getKey(), 0.01d); - - r = prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, 0.3))).get(); - assertNoFailures(r); - - histogram = r.getAggregations().get("histo"); - buckets = histogram.getBuckets(); - assertEquals(1, buckets.size()); - assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + assertNoFailuresAndResponse( + prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, null))), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + assertEquals(0.5, (double) buckets.get(4).getKey(), 0.01d); + } + ); + assertNoFailuresAndResponse( + prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(null, 0.0))), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(1, buckets.size()); + assertEquals(-0.6, (double) buckets.get(0).getKey(), 0.01d); + } + ); + assertNoFailuresAndResponse( + prepareSearch("test").addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, 0.3))), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(1, buckets.size()); + assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + } + ); } private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(1) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - Histogram histogram = response.getAggregations().get("histo"); - assertThat(histogram, notNullValue()); - assertThat(histogram.getName(), equalTo("histo")); - assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (Histogram.Bucket bucket : histogram.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo(expectedKeys[i])); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } private long key(Histogram.Bucket bucket) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java index 8e4c503b89bb5..887afdb578fdb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; @@ -25,7 +24,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -51,10 +50,9 @@ public void setupSuiteScopeCluster() throws Exception { indexRandom( true, - client().prepareIndex("idx").setId("1").setSource("ip", "192.168.1.7", "ips", Arrays.asList("192.168.0.13", "192.168.1.2")), - client().prepareIndex("idx").setId("2").setSource("ip", "192.168.1.10", "ips", Arrays.asList("192.168.1.25", "192.168.1.28")), - client().prepareIndex("idx") - .setId("3") + prepareIndex("idx").setId("1").setSource("ip", "192.168.1.7", "ips", Arrays.asList("192.168.0.13", "192.168.1.2")), + prepareIndex("idx").setId("2").setSource("ip", "192.168.1.10", "ips", Arrays.asList("192.168.1.25", "192.168.1.28")), + prepareIndex("idx").setId("3") .setSource("ip", "2001:db8::ff00:42:8329", "ips", Arrays.asList("2001:db8::ff00:42:8329", "2001:db8::ff00:42:8380")) ); @@ -64,152 +62,167 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSingleValuedField() { - SearchResponse rsp = prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ip") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(0, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(1, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(2, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ip") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(0, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(1, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(2, bucket3.getDocCount()); + } + ); } public void testMultiValuedField() { - SearchResponse rsp = prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ips") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(1, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(1, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(2, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ips") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(1, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(1, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(2, bucket3.getDocCount()); + } + ); } public void testIpMask() { - SearchResponse rsp = prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ips") - .addMaskRange("::/0") - .addMaskRange("0.0.0.0/0") - .addMaskRange("2001:db8::/64") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertEquals("::/0", bucket1.getKey()); - assertEquals(3, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("0.0.0.0/0", bucket2.getKey()); - assertEquals(2, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("2001:db8::/64", bucket3.getKey()); - assertEquals(1, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ips") + .addMaskRange("::/0") + .addMaskRange("0.0.0.0/0") + .addMaskRange("2001:db8::/64") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertEquals("::/0", bucket1.getKey()); + assertEquals(3, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("0.0.0.0/0", bucket2.getKey()); + assertEquals(2, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("2001:db8::/64", bucket3.getKey()); + assertEquals(1, bucket3.getDocCount()); + } + ); } public void testPartiallyUnmapped() { - SearchResponse rsp = prepareSearch("idx", "idx_unmapped").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ip") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(0, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(1, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(2, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ip") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(0, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(1, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(2, bucket3.getDocCount()); + } + ); } public void testUnmapped() { - SearchResponse rsp = prepareSearch("idx_unmapped").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ip") - .addUnboundedTo("192.168.1.0") - .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10") - ).get(); - assertNoFailures(rsp); - Range range = rsp.getAggregations().get("my_range"); - assertEquals(3, range.getBuckets().size()); - - Range.Bucket bucket1 = range.getBuckets().get(0); - assertNull(bucket1.getFrom()); - assertEquals("192.168.1.0", bucket1.getTo()); - assertEquals("*-192.168.1.0", bucket1.getKey()); - assertEquals(0, bucket1.getDocCount()); - - Range.Bucket bucket2 = range.getBuckets().get(1); - assertEquals("192.168.1.0", bucket2.getFrom()); - assertEquals("192.168.1.10", bucket2.getTo()); - assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); - assertEquals(0, bucket2.getDocCount()); - - Range.Bucket bucket3 = range.getBuckets().get(2); - assertEquals("192.168.1.10", bucket3.getFrom()); - assertNull(bucket3.getTo()); - assertEquals("192.168.1.10-*", bucket3.getKey()); - assertEquals(0, bucket3.getDocCount()); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + AggregationBuilders.ipRange("my_range") + .field("ip") + .addUnboundedTo("192.168.1.0") + .addRange("192.168.1.0", "192.168.1.10") + .addUnboundedFrom("192.168.1.10") + ), + response -> { + Range range = response.getAggregations().get("my_range"); + assertEquals(3, range.getBuckets().size()); + + Range.Bucket bucket1 = range.getBuckets().get(0); + assertNull(bucket1.getFrom()); + assertEquals("192.168.1.0", bucket1.getTo()); + assertEquals("*-192.168.1.0", bucket1.getKey()); + assertEquals(0, bucket1.getDocCount()); + + Range.Bucket bucket2 = range.getBuckets().get(1); + assertEquals("192.168.1.0", bucket2.getFrom()); + assertEquals("192.168.1.10", bucket2.getTo()); + assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey()); + assertEquals(0, bucket2.getDocCount()); + + Range.Bucket bucket3 = range.getBuckets().get(2); + assertEquals("192.168.1.10", bucket3.getFrom()); + assertNull(bucket3.getTo()); + assertEquals("192.168.1.10-*", bucket3.getKey()); + assertEquals(0, bucket3.getDocCount()); + } + ); } public void testRejectsScript() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java index d50ea294287eb..7f9cb01599a00 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -22,7 +21,7 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class IpTermsIT extends AbstractTermsTestCase { @@ -55,83 +54,91 @@ public void testScriptValue() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") + prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") ); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip'].value", Collections.emptyMap()); - SearchResponse response = prepareSearch("index").addAggregation( - new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) - ).get(); - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("my_terms"); - assertEquals(2, terms.getBuckets().size()); - - StringTerms.Bucket bucket1 = terms.getBuckets().get(0); - assertEquals(2, bucket1.getDocCount()); - assertEquals("192.168.1.7", bucket1.getKey()); - assertEquals("192.168.1.7", bucket1.getKeyAsString()); - - StringTerms.Bucket bucket2 = terms.getBuckets().get(1); - assertEquals(1, bucket2.getDocCount()); - assertEquals("2001:db8::2:1", bucket2.getKey()); - assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + assertNoFailuresAndResponse( + prepareSearch("index").addAggregation( + new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) + ), + response -> { + StringTerms terms = response.getAggregations().get("my_terms"); + assertEquals(2, terms.getBuckets().size()); + + StringTerms.Bucket bucket1 = terms.getBuckets().get(0); + assertEquals(2, bucket1.getDocCount()); + assertEquals("192.168.1.7", bucket1.getKey()); + assertEquals("192.168.1.7", bucket1.getKeyAsString()); + + StringTerms.Bucket bucket2 = terms.getBuckets().get(1); + assertEquals(1, bucket2.getDocCount()); + assertEquals("2001:db8::2:1", bucket2.getKey()); + assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + } + ); } public void testScriptValues() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") + prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") ); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip']", Collections.emptyMap()); - SearchResponse response = prepareSearch("index").addAggregation( - new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) - ).get(); - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("my_terms"); - assertEquals(2, terms.getBuckets().size()); - - StringTerms.Bucket bucket1 = terms.getBuckets().get(0); - assertEquals(2, bucket1.getDocCount()); - assertEquals("192.168.1.7", bucket1.getKey()); - assertEquals("192.168.1.7", bucket1.getKeyAsString()); - - StringTerms.Bucket bucket2 = terms.getBuckets().get(1); - assertEquals(1, bucket2.getDocCount()); - assertEquals("2001:db8::2:1", bucket2.getKey()); - assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + assertNoFailuresAndResponse( + prepareSearch("index").addAggregation( + new TermsAggregationBuilder("my_terms").script(script).executionHint(randomExecutionHint()) + ), + response -> { + StringTerms terms = response.getAggregations().get("my_terms"); + assertEquals(2, terms.getBuckets().size()); + + StringTerms.Bucket bucket1 = terms.getBuckets().get(0); + assertEquals(2, bucket1.getDocCount()); + assertEquals("192.168.1.7", bucket1.getKey()); + assertEquals("192.168.1.7", bucket1.getKeyAsString()); + + StringTerms.Bucket bucket2 = terms.getBuckets().get(1); + assertEquals(1, bucket2.getDocCount()); + assertEquals("2001:db8::2:1", bucket2.getKey()); + assertEquals("2001:db8::2:1", bucket2.getKeyAsString()); + } + ); } public void testMissingValue() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "127.0.0.1"), - client().prepareIndex("index").setId("4").setSource("not_ip", "something") + prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + prepareIndex("index").setId("3").setSource("ip", "127.0.0.1"), + prepareIndex("index").setId("4").setSource("not_ip", "something") + ); + assertNoFailuresAndResponse( + prepareSearch("index").addAggregation( + new TermsAggregationBuilder("my_terms").field("ip").missing("127.0.0.1").executionHint(randomExecutionHint()) + ), + response -> { + StringTerms terms = response.getAggregations().get("my_terms"); + assertEquals(2, terms.getBuckets().size()); + + StringTerms.Bucket bucket1 = terms.getBuckets().get(0); + assertEquals(2, bucket1.getDocCount()); + assertEquals("127.0.0.1", bucket1.getKey()); + assertEquals("127.0.0.1", bucket1.getKeyAsString()); + + StringTerms.Bucket bucket2 = terms.getBuckets().get(1); + assertEquals(2, bucket2.getDocCount()); + assertEquals("192.168.1.7", bucket2.getKey()); + assertEquals("192.168.1.7", bucket2.getKeyAsString()); + } ); - SearchResponse response = prepareSearch("index").addAggregation( - new TermsAggregationBuilder("my_terms").field("ip").missing("127.0.0.1").executionHint(randomExecutionHint()) - ).get(); - - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("my_terms"); - assertEquals(2, terms.getBuckets().size()); - - StringTerms.Bucket bucket1 = terms.getBuckets().get(0); - assertEquals(2, bucket1.getDocCount()); - assertEquals("127.0.0.1", bucket1.getKey()); - assertEquals("127.0.0.1", bucket1.getKeyAsString()); - - StringTerms.Bucket bucket2 = terms.getBuckets().get(1); - assertEquals(2, bucket2.getDocCount()); - assertEquals("192.168.1.7", bucket2.getKey()); - assertEquals("192.168.1.7", bucket2.getKeyAsString()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 6c3d1c44aafed..f0c5cbf9c76bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -54,6 +53,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -102,32 +102,30 @@ public void setupSuiteScopeCluster() throws Exception { createIndex("idx", "high_card_idx"); IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[NUM_DOCS]; for (int i = 0; i < lowCardBuilders.length; i++) { - lowCardBuilders[i] = client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i) - .startArray(MULTI_VALUED_FIELD_NAME) - .value(i) - .value(i + 1) - .endArray() - .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg - .field("constant", 1) - .endObject() - ); + lowCardBuilders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i) + .value(i + 1) + .endArray() + .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) + .endObject() + ); } indexRandom(true, lowCardBuilders); IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size? for (int i = 0; i < highCardBuilders.length; i++) { - highCardBuilders[i] = client().prepareIndex("high_card_idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i) - .startArray(MULTI_VALUED_FIELD_NAME) - .value(i) - .value(i + 1) - .endArray() - .endObject() - ); + highCardBuilders[i] = prepareIndex("high_card_idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i) + .value(i + 1) + .endArray() + .endObject() + ); } indexRandom(true, highCardBuilders); @@ -137,8 +135,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -197,45 +194,55 @@ private void getMultiSortDocs(List builders) throws IOExcep createIndex("sort_idx"); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject() + ) ); } @@ -263,108 +270,119 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms - SearchResponse allResponse = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(allResponse); - LongTerms terms = allResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - int expectedCardinality = terms.getBuckets().size(); + int[] expectedCardinality = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + assertNoFailures(response); + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + expectedCardinality[0] = terms.getBuckets().size(); + } + ); // Gather terms using partitioned aggregations final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - for (LongTerms.Bucket bucket : terms.getBuckets()) { - assertFalse(foundTerms.contains(bucket.getKeyAsNumber())); - foundTerms.add(bucket.getKeyAsNumber()); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (LongTerms.Bucket bucket : terms.getBuckets()) { + assertFalse(foundTerms.contains(bucket.getKeyAsNumber())); + foundTerms.add(bucket.getKeyAsNumber()); + } + } + ); } - assertEquals(expectedCardinality, foundTerms.size()); + assertEquals(expectedCardinality[0], foundTerms.size()); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - // Scripts force the results to doubles - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + // Scripts force the results to doubles + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i + 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value - 1", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - // Scripts force the results to doubles - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i - 1d)); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + (i - 1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i - 1)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value - 1", Collections.emptyMap())) + ), + response -> { + // Scripts force the results to doubles + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + DoubleTerms.Bucket bucket = terms.getBucketByKey("" + (i - 1d)); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + (i - 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i - 1)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / 1000 + 1)", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - // The script always converts long to double - DoubleTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); - - DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("1.0")); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); - assertThat(bucket.getDocCount(), equalTo(5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / 1000 + 1)", Collections.emptyMap())) + ), + response -> { + // The script always converts long to double + DoubleTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + + DoubleTerms.Bucket bucket = terms.getBucketByKey("1.0"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1.0")); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1)); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } /* @@ -392,27 +410,28 @@ public void testScriptSingleValue() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.LONG) - .script(script) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getKeyAsNumber(), instanceOf(Long.class)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.LONG) + .script(script) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getKeyAsNumber(), instanceOf(Long.class)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptMultiValued() throws Exception { @@ -423,207 +442,213 @@ public void testScriptMultiValued() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .userValueTypeHint(ValueType.LONG) - .script(script) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .userValueTypeHint(ValueType.LONG) + .script(script) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testPartiallyUnmappedWithFormat() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped", "idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .format("0000") - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - String key = Strings.format("%04d", i); - LongTerms.Bucket bucket = terms.getBucketByKey(key); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(key)); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .format("0000") + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + String key = Strings.format("%04d", i); + LongTerms.Bucket bucket = terms.getBucketByKey(key); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(key)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithTermsSubAgg() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", asc)) - .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation( - new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Avg avg = bucket.getAggregations().get("avg_i"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo((double) i)); - - LongTerms subTermsAgg = bucket.getAggregations().get("subTerms"); - assertThat(subTermsAgg, notNullValue()); - assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); - int j = i; - for (LongTerms.Bucket subBucket : subTermsAgg.getBuckets()) { - assertThat(subBucket, notNullValue()); - assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); - assertThat(subBucket.getDocCount(), equalTo(1L)); - j++; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", asc)) + .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation( + new TermsAggregationBuilder("subTerms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Avg avg = bucket.getAggregations().get("avg_i"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo((double) i)); + + LongTerms subTermsAgg = bucket.getAggregations().get("subTerms"); + assertThat(subTermsAgg, notNullValue()); + assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); + int j = i; + for (LongTerms.Bucket subBucket : subTermsAgg.getBuckets()) { + assertThat(subBucket, notNullValue()); + assertThat(subBucket.getKeyAsString(), equalTo(String.valueOf(j))); + assertThat(subBucket.getDocCount(), equalTo(1L)); + j++; + } + } } - } + ); } public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("num_tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) - ).get(); - - assertNoFailures(response); - - LongTerms tags = response.getAggregations().get("num_tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("num_tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - LongTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - Filter filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("num_tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ), + response -> { + LongTerms tags = response.getAggregations().get("num_tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("num_tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + LongTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + Filter filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + ) ) - ) - ).get(); - - assertNoFailures(response); - - LongTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "1" is 2 - // the max for "0" is 4 - - LongTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Max max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - max = filter2.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + LongTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "1" is 2 + // the max for "0" is 4 + + LongTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "1" : "0")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Max max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "0" : "1")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + max = filter2.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception { @@ -707,89 +732,89 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 4; i >= 0; i--) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 4; i >= 0; i--) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.variance", asc)) - .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - LongTerms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.variance", asc)) + .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + LongTerms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + } + } + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { @@ -833,34 +858,35 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (LongTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (LongTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(expectedKeys[i]))); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } public void testOtherDocCount() { @@ -878,8 +904,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -893,13 +919,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -911,13 +937,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -929,8 +955,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 36ba2a988668a..4e93bf578cc87 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -11,8 +11,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; @@ -49,6 +47,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @ESIntegTestCase.SuiteScopeTestCase @@ -114,16 +113,15 @@ public void setupSuiteScopeCluster() throws Exception { final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); for (int j = 0; j < frequency; ++j) { indexRequests.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("s", stringTerm) - .field("l", longTerm) - .field("d", doubleTerm) - .field("date", dateTerm) - .field("match", randomBoolean()) - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("s", stringTerm) + .field("l", longTerm) + .field("d", doubleTerm) + .field("date", dateTerm) + .field("match", randomBoolean()) + .endObject() + ) ); } } @@ -306,41 +304,47 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord private void testMinDocCountOnTerms(String field, Script script, BucketOrder order, String include, boolean retry) throws Exception { // all terms - final SearchResponse allTermsResponse = prepareSearch("idx").setSize(0) - .setQuery(QUERY) - .addAggregation( - script.apply(terms("terms"), field) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .order(order) - .size(cardinality + randomInt(10)) - .minDocCount(0) - ) - .get(); - assertAllSuccessful(allTermsResponse); - - final Terms allTerms = allTermsResponse.getAggregations().get("terms"); - assertEquals(cardinality, allTerms.getBuckets().size()); - - for (long minDocCount = 0; minDocCount < 20; ++minDocCount) { - final int size = randomIntBetween(1, cardinality + 2); - final SearchRequest request = prepareSearch("idx").setSize(0) + assertResponse( + prepareSearch("idx").setSize(0) .setQuery(QUERY) .addAggregation( script.apply(terms("terms"), field) .collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint(randomExecutionHint()) .order(order) - .size(size) - .includeExclude(include == null ? null : new IncludeExclude(include, null, null, null)) - .shardSize(cardinality + randomInt(10)) - .minDocCount(minDocCount) - ) - .request(); - final SearchResponse response = client().search(request).get(); - assertAllSuccessful(response); - assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include); - } + .size(cardinality + randomInt(10)) + .minDocCount(0) + ), + allTermsResponse -> { + assertAllSuccessful(allTermsResponse); + + final Terms allTerms = allTermsResponse.getAggregations().get("terms"); + assertEquals(cardinality, allTerms.getBuckets().size()); + + for (long minDocCount = 0; minDocCount < 20; ++minDocCount) { + final int size = randomIntBetween(1, cardinality + 2); + final long finalMinDocCount = minDocCount; + assertResponse( + prepareSearch("idx").setSize(0) + .setQuery(QUERY) + .addAggregation( + script.apply(terms("terms"), field) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .order(order) + .size(size) + .includeExclude(include == null ? null : new IncludeExclude(include, null, null, null)) + .shardSize(cardinality + randomInt(10)) + .minDocCount(minDocCount) + ), + response -> { + assertAllSuccessful(response); + assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), finalMinDocCount, size, include); + } + ); + } + } + ); } public void testHistogramCountAsc() throws Exception { @@ -377,38 +381,52 @@ public void testDateHistogramKeyDesc() throws Exception { private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final int interval = randomIntBetween(1, 3); - final SearchResponse allResponse = prepareSearch("idx").setSize(0) - .setQuery(QUERY) - .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)) - .get(); - - final Histogram allHisto = allResponse.getAggregations().get("histo"); - - for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { - final SearchResponse response = prepareSearch("idx").setSize(0) + assertResponse( + prepareSearch("idx").setSize(0) .setQuery(QUERY) - .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)) - .get(); - assertSubset(allHisto, (Histogram) response.getAggregations().get("histo"), minDocCount); - } + .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)), + allResponse -> { + final Histogram allHisto = allResponse.getAggregations().get("histo"); + for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { + final long finalMinDocCount = minDocCount; + assertResponse( + prepareSearch("idx").setSize(0) + .setQuery(QUERY) + .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)), + response -> { + assertSubset(allHisto, response.getAggregations().get("histo"), finalMinDocCount); + } + ); + } + } + ); } private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception { - final SearchResponse allResponse = prepareSearch("idx").setSize(0) - .setQuery(QUERY) - .addAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(0)) - .get(); - - final Histogram allHisto = allResponse.getAggregations().get("histo"); - - for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { - final SearchResponse response = prepareSearch("idx").setSize(0) + assertResponse( + prepareSearch("idx").setSize(0) .setQuery(QUERY) - .addAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(minDocCount) - ) - .get(); - assertSubset(allHisto, response.getAggregations().get("histo"), minDocCount); - } + .addAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(0)), + allResponse -> { + final Histogram allHisto = allResponse.getAggregations().get("histo"); + + for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { + final long finalMinDocCount = minDocCount; + assertResponse( + prepareSearch("idx").setSize(0) + .setQuery(QUERY) + .addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.DAY) + .order(order) + .minDocCount(minDocCount) + ), + response -> { + assertSubset(allHisto, response.getAggregations().get("histo"), finalMinDocCount); + } + ); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java index eb2ad6de7789e..e6d0b6a1f9f1c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.util.Comparators; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -28,7 +27,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.IsNull.notNullValue; @@ -114,7 +113,7 @@ public void setupSuiteScopeCluster() throws Exception { if (randomBoolean()) { source.field("numeric_value", randomDouble()); } - client().prepareIndex("idx").setSource(source.endObject()).get(); + prepareIndex("idx").setSource(source.endObject()).get(); } refresh(); ensureSearchable(); @@ -145,16 +144,18 @@ private void assertCorrectlySorted(Histogram histo, boolean asc, SubAggregation public void testTerms(String fieldName) { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").field(fieldName) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(agg.builder()) - .order(BucketOrder.aggregation(agg.sortKey(), asc)) - ).get(); - - assertNoFailures(response); - final Terms terms = response.getAggregations().get("terms"); - assertCorrectlySorted(terms, asc, agg); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").field(fieldName) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(agg.builder()) + .order(BucketOrder.aggregation(agg.sortKey(), asc)) + ), + response -> { + final Terms terms = response.getAggregations().get("terms"); + assertCorrectlySorted(terms, asc, agg); + } + ); } public void testStringTerms() { @@ -172,16 +173,17 @@ public void testDoubleTerms() { public void testLongHistogram() { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); - SearchResponse response = prepareSearch("idx").addAggregation( - histogram("histo").field("long_value") - .interval(randomIntBetween(1, 2)) - .subAggregation(agg.builder()) - .order(BucketOrder.aggregation(agg.sortKey(), asc)) - ).get(); - - assertNoFailures(response); - final Histogram histo = response.getAggregations().get("histo"); - assertCorrectlySorted(histo, asc, agg); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field("long_value") + .interval(randomIntBetween(1, 2)) + .subAggregation(agg.builder()) + .order(BucketOrder.aggregation(agg.sortKey(), asc)) + ), + response -> { + final Histogram histo = response.getAggregations().get("histo"); + assertCorrectlySorted(histo, asc, agg); + } + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 2ab107c2580c7..9a27b0d8f75a3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -47,7 +46,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -92,15 +91,14 @@ public void setupSuiteScopeCluster() throws Exception { source = source.startObject().field("value", i + 1 + j).endObject(); } source = source.endArray().endObject(); - builders.add(client().prepareIndex("idx").setId("" + i + 1).setSource(source)); + builders.add(prepareIndex("idx").setId("" + i + 1).setSource(source)); } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer", "nested", "type=nested").get(); ensureGreen("empty_bucket_idx"); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource( jsonBuilder().startObject() .field("value", i * 2) @@ -147,8 +145,7 @@ public void setupSuiteScopeCluster() throws Exception { ensureGreen("idx_nested_nested_aggs"); builders.add( - client().prepareIndex("idx_nested_nested_aggs") - .setId("1") + prepareIndex("idx_nested_nested_aggs").setId("1") .setSource( jsonBuilder().startObject() .startArray("nested1") @@ -177,176 +174,186 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - nested("nested", "nested").subAggregation(stats("nested_value_stats").field("nested.value")) - ).get(); - - assertNoFailures(response); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + nested("nested", "nested").subAggregation(stats("nested_value_stats").field("nested.value")) + ), + response -> { + double min = Double.POSITIVE_INFINITY; + double max = Double.NEGATIVE_INFINITY; + long sum = 0; + long count = 0; + for (int i = 0; i < numParents; ++i) { + for (int j = 0; j < numChildren[i]; ++j) { + final long value = i + 1 + j; + min = Math.min(min, value); + max = Math.max(max, value); + sum += value; + ++count; + } + } - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - long sum = 0; - long count = 0; - for (int i = 0; i < numParents; ++i) { - for (int j = 0; j < numChildren[i]; ++j) { - final long value = i + 1 + j; - min = Math.min(min, value); - max = Math.max(max, value); - sum += value; - ++count; + Nested nested = response.getAggregations().get("nested"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), equalTo(count)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + Stats stats = nested.getAggregations().get("nested_value_stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMin(), equalTo(min)); + assertThat(stats.getMax(), equalTo(max)); + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getSum(), equalTo((double) sum)); + assertThat(stats.getAvg(), equalTo((double) sum / count)); } - } - - Nested nested = response.getAggregations().get("nested"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), equalTo(count)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - Stats stats = nested.getAggregations().get("nested_value_stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMin(), equalTo(min)); - assertThat(stats.getMax(), equalTo(max)); - assertThat(stats.getCount(), equalTo(count)); - assertThat(stats.getSum(), equalTo((double) sum)); - assertThat(stats.getAvg(), equalTo((double) sum / count)); + ); } public void testNonExistingNestedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").addAggregation( - nested("nested", "value").subAggregation(stats("nested_value_stats").field("nested.value")) - ).get(); - - Nested nested = searchResponse.getAggregations().get("nested"); - assertThat(nested, Matchers.notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + nested("nested", "value").subAggregation(stats("nested_value_stats").field("nested.value")) + ), + response -> { + Nested nested = response.getAggregations().get("nested"); + assertThat(nested, Matchers.notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), is(0L)); + } + ); } public void testNestedWithSubTermsAgg() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - nested("nested", "nested").subAggregation(terms("values").field("nested.value").size(100).collectMode(aggCollectionMode)) - ).get(); - - assertNoFailures(response); - - long docCount = 0; - long[] counts = new long[numParents + 6]; - for (int i = 0; i < numParents; ++i) { - for (int j = 0; j < numChildren[i]; ++j) { - final int value = i + 1 + j; - ++counts[value]; - ++docCount; - } - } - int uniqueValues = 0; - for (long count : counts) { - if (count > 0) { - ++uniqueValues; - } - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + nested("nested", "nested").subAggregation(terms("values").field("nested.value").size(100).collectMode(aggCollectionMode)) + ), + response -> { + long docCount = 0; + long[] counts = new long[numParents + 6]; + for (int i = 0; i < numParents; ++i) { + for (int j = 0; j < numChildren[i]; ++j) { + final int value = i + 1 + j; + ++counts[value]; + ++docCount; + } + } + int uniqueValues = 0; + for (long count : counts) { + if (count > 0) { + ++uniqueValues; + } + } - Nested nested = response.getAggregations().get("nested"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), equalTo(docCount)); - assertThat(((InternalAggregation) nested).getProperty("_count"), equalTo(docCount)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - LongTerms values = nested.getAggregations().get("values"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("values")); - assertThat(values.getBuckets(), notNullValue()); - assertThat(values.getBuckets().size(), equalTo(uniqueValues)); - for (int i = 0; i < counts.length; ++i) { - final String key = Long.toString(i); - if (counts[i] == 0) { - assertNull(values.getBucketByKey(key)); - } else { - Bucket bucket = values.getBucketByKey(key); - assertNotNull(bucket); - assertEquals(counts[i], bucket.getDocCount()); + Nested nested = response.getAggregations().get("nested"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), equalTo(docCount)); + assertThat(((InternalAggregation) nested).getProperty("_count"), equalTo(docCount)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + LongTerms values = nested.getAggregations().get("values"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("values")); + assertThat(values.getBuckets(), notNullValue()); + assertThat(values.getBuckets().size(), equalTo(uniqueValues)); + for (int i = 0; i < counts.length; ++i) { + final String key = Long.toString(i); + if (counts[i] == 0) { + assertNull(values.getBucketByKey(key)); + } else { + Bucket bucket = values.getBucketByKey(key); + assertNotNull(bucket); + assertEquals(counts[i], bucket.getDocCount()); + } + } + assertThat(((InternalAggregation) nested).getProperty("values"), sameInstance(values)); } - } - assertThat(((InternalAggregation) nested).getProperty("values"), sameInstance(values)); + ); } public void testNestedAsSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("top_values").field("value") - .size(100) - .collectMode(aggCollectionMode) - .subAggregation(nested("nested", "nested").subAggregation(max("max_value").field("nested.value"))) - ).get(); - - assertNoFailures(response); - - LongTerms values = response.getAggregations().get("top_values"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("top_values")); - assertThat(values.getBuckets(), notNullValue()); - assertThat(values.getBuckets().size(), equalTo(numParents)); - - for (int i = 0; i < numParents; i++) { - String topValue = "" + (i + 1); - assertThat(values.getBucketByKey(topValue), notNullValue()); - Nested nested = values.getBucketByKey(topValue).getAggregations().get("nested"); - assertThat(nested, notNullValue()); - Max max = nested.getAggregations().get("max_value"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(numChildren[i] == 0 ? Double.NEGATIVE_INFINITY : (double) i + numChildren[i])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("top_values").field("value") + .size(100) + .collectMode(aggCollectionMode) + .subAggregation(nested("nested", "nested").subAggregation(max("max_value").field("nested.value"))) + ), + response -> { + LongTerms values = response.getAggregations().get("top_values"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("top_values")); + assertThat(values.getBuckets(), notNullValue()); + assertThat(values.getBuckets().size(), equalTo(numParents)); + + for (int i = 0; i < numParents; i++) { + String topValue = "" + (i + 1); + assertThat(values.getBucketByKey(topValue), notNullValue()); + Nested nested = values.getBucketByKey(topValue).getAggregations().get("nested"); + assertThat(nested, notNullValue()); + Max max = nested.getAggregations().get("max_value"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(numChildren[i] == 0 ? Double.NEGATIVE_INFINITY : (double) i + numChildren[i])); + } + } + ); } public void testNestNestedAggs() throws Exception { - SearchResponse response = prepareSearch("idx_nested_nested_aggs").addAggregation( - nested("level1", "nested1").subAggregation( - terms("a").field("nested1.a.keyword") - .collectMode(aggCollectionMode) - .subAggregation(nested("level2", "nested1.nested2").subAggregation(sum("sum").field("nested1.nested2.b"))) - ) - ).get(); - assertNoFailures(response); - - Nested level1 = response.getAggregations().get("level1"); - assertThat(level1, notNullValue()); - assertThat(level1.getName(), equalTo("level1")); - assertThat(level1.getDocCount(), equalTo(2L)); - - StringTerms a = level1.getAggregations().get("a"); - Terms.Bucket bBucket = a.getBucketByKey("a"); - assertThat(bBucket.getDocCount(), equalTo(1L)); - - Nested level2 = bBucket.getAggregations().get("level2"); - assertThat(level2.getDocCount(), equalTo(1L)); - Sum sum = level2.getAggregations().get("sum"); - assertThat(sum.value(), equalTo(2d)); - - a = level1.getAggregations().get("a"); - bBucket = a.getBucketByKey("b"); - assertThat(bBucket.getDocCount(), equalTo(1L)); - - level2 = bBucket.getAggregations().get("level2"); - assertThat(level2.getDocCount(), equalTo(1L)); - sum = level2.getAggregations().get("sum"); - assertThat(sum.value(), equalTo(2d)); + assertNoFailuresAndResponse( + prepareSearch("idx_nested_nested_aggs").addAggregation( + nested("level1", "nested1").subAggregation( + terms("a").field("nested1.a.keyword") + .collectMode(aggCollectionMode) + .subAggregation(nested("level2", "nested1.nested2").subAggregation(sum("sum").field("nested1.nested2.b"))) + ) + ), + response -> { + Nested level1 = response.getAggregations().get("level1"); + assertThat(level1, notNullValue()); + assertThat(level1.getName(), equalTo("level1")); + assertThat(level1.getDocCount(), equalTo(2L)); + + StringTerms a = level1.getAggregations().get("a"); + Terms.Bucket bBucket = a.getBucketByKey("a"); + assertThat(bBucket.getDocCount(), equalTo(1L)); + + Nested level2 = bBucket.getAggregations().get("level2"); + assertThat(level2.getDocCount(), equalTo(1L)); + Sum sum = level2.getAggregations().get("sum"); + assertThat(sum.value(), equalTo(2d)); + + a = level1.getAggregations().get("a"); + bBucket = a.getBucketByKey("b"); + assertThat(bBucket.getDocCount(), equalTo(1L)); + + level2 = bBucket.getAggregations().get("level2"); + assertThat(level2.getDocCount(), equalTo(1L)); + sum = level2.getAggregations().get("sum"); + assertThat(sum.value(), equalTo(2d)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(nested("nested", "nested"))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Nested nested = bucket.getAggregations().get("nested"); - assertThat(nested, Matchers.notNullValue()); - assertThat(nested.getName(), equalTo("nested")); - assertThat(nested.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(nested("nested", "nested"))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Nested nested = bucket.getAggregations().get("nested"); + assertThat(nested, Matchers.notNullValue()); + assertThat(nested.getName(), equalTo("nested")); + assertThat(nested.getDocCount(), is(0L)); + } + ); } // TODO previously we would detect if you tried to do a nested agg on a non-nested object field, @@ -422,7 +429,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { ensureGreen("idx2"); List indexRequests = new ArrayList<>(2); - indexRequests.add(client().prepareIndex("idx2").setId("1").setSource(""" + indexRequests.add(prepareIndex("idx2").setId("1").setSource(""" { "dates": { "month": { @@ -444,7 +451,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { } ] }""", XContentType.JSON)); - indexRequests.add(client().prepareIndex("idx2").setId("2").setSource(""" + indexRequests.add(prepareIndex("idx2").setId("2").setSource(""" { "dates": { "month": { @@ -468,60 +475,65 @@ public void testParentFilterResolvedCorrectly() throws Exception { }""", XContentType.JSON)); indexRandom(true, indexRequests); - SearchResponse response = prepareSearch("idx2").addAggregation( - terms("startDate").field("dates.month.start") - .subAggregation( - terms("endDate").field("dates.month.end") - .subAggregation( - terms("period").field("dates.month.label") - .subAggregation( - nested("ctxt_idfier_nested", "comments").subAggregation( - filter("comment_filter", termQuery("comments.identifier", "29111")).subAggregation( - nested("nested_tags", "comments.tags").subAggregation(terms("tag").field("comments.tags.name")) + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + terms("startDate").field("dates.month.start") + .subAggregation( + terms("endDate").field("dates.month.end") + .subAggregation( + terms("period").field("dates.month.label") + .subAggregation( + nested("ctxt_idfier_nested", "comments").subAggregation( + filter("comment_filter", termQuery("comments.identifier", "29111")).subAggregation( + nested("nested_tags", "comments.tags").subAggregation( + terms("tag").field("comments.tags.name") + ) + ) ) ) - ) - ) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 2); - - Terms startDate = response.getAggregations().get("startDate"); - assertThat(startDate.getBuckets().size(), equalTo(2)); - Terms.Bucket bucket = startDate.getBucketByKey("2014-11-01T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - Terms endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("2014-11-30T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - Terms period = bucket.getAggregations().get("period"); - bucket = period.getBucketByKey("2014-11"); - assertThat(bucket.getDocCount(), equalTo(1L)); - Nested comments = bucket.getAggregations().get("ctxt_idfier_nested"); - assertThat(comments.getDocCount(), equalTo(2L)); - Filter filter = comments.getAggregations().get("comment_filter"); - assertThat(filter.getDocCount(), equalTo(1L)); - Nested nestedTags = filter.getAggregations().get("nested_tags"); - assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 - Terms tags = nestedTags.getAggregations().get("tag"); - assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty - - bucket = startDate.getBucketByKey("2014-12-01T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("2014-12-31T00:00:00.000Z"); - assertThat(bucket.getDocCount(), equalTo(1L)); - period = bucket.getAggregations().get("period"); - bucket = period.getBucketByKey("2014-12"); - assertThat(bucket.getDocCount(), equalTo(1L)); - comments = bucket.getAggregations().get("ctxt_idfier_nested"); - assertThat(comments.getDocCount(), equalTo(2L)); - filter = comments.getAggregations().get("comment_filter"); - assertThat(filter.getDocCount(), equalTo(1L)); - nestedTags = filter.getAggregations().get("nested_tags"); - assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 - tags = nestedTags.getAggregations().get("tag"); - assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + ) + ) + ), + response -> { + assertHitCount(response, 2); + + Terms startDate = response.getAggregations().get("startDate"); + assertThat(startDate.getBuckets().size(), equalTo(2)); + Terms.Bucket bucket = startDate.getBucketByKey("2014-11-01T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + Terms endDate = bucket.getAggregations().get("endDate"); + bucket = endDate.getBucketByKey("2014-11-30T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + Terms period = bucket.getAggregations().get("period"); + bucket = period.getBucketByKey("2014-11"); + assertThat(bucket.getDocCount(), equalTo(1L)); + Nested comments = bucket.getAggregations().get("ctxt_idfier_nested"); + assertThat(comments.getDocCount(), equalTo(2L)); + Filter filter = comments.getAggregations().get("comment_filter"); + assertThat(filter.getDocCount(), equalTo(1L)); + Nested nestedTags = filter.getAggregations().get("nested_tags"); + assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 + Terms tags = nestedTags.getAggregations().get("tag"); + assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + + bucket = startDate.getBucketByKey("2014-12-01T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + endDate = bucket.getAggregations().get("endDate"); + bucket = endDate.getBucketByKey("2014-12-31T00:00:00.000Z"); + assertThat(bucket.getDocCount(), equalTo(1L)); + period = bucket.getAggregations().get("period"); + bucket = period.getBucketByKey("2014-12"); + assertThat(bucket.getDocCount(), equalTo(1L)); + comments = bucket.getAggregations().get("ctxt_idfier_nested"); + assertThat(comments.getDocCount(), equalTo(2L)); + filter = comments.getAggregations().get("comment_filter"); + assertThat(filter.getDocCount(), equalTo(1L)); + nestedTags = filter.getAggregations().get("nested_tags"); + assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 + tags = nestedTags.getAggregations().get("tag"); + assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + } + ); } public void testNestedSameDocIdProcessedMultipleTime() throws Exception { @@ -531,8 +543,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { ); ensureGreen("idx4"); - client().prepareIndex("idx4") - .setId("1") + prepareIndex("idx4").setId("1") .setSource( jsonBuilder().startObject() .field("name", "product1") @@ -551,8 +562,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { .endObject() ) .get(); - client().prepareIndex("idx4") - .setId("2") + prepareIndex("idx4").setId("2") .setSource( jsonBuilder().startObject() .field("name", "product2") @@ -573,59 +583,62 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("idx4").addAggregation( - terms("category").field("categories") - .subAggregation(nested("property", "property").subAggregation(terms("property_id").field("property.id"))) - ).get(); - assertNoFailures(response); - assertHitCount(response, 2); - - Terms category = response.getAggregations().get("category"); - assertThat(category.getBuckets().size(), equalTo(4)); - - Terms.Bucket bucket = category.getBucketByKey("1"); - assertThat(bucket.getDocCount(), equalTo(2L)); - Nested property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(6L)); - Terms propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(5)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); - - bucket = category.getBucketByKey("2"); - assertThat(bucket.getDocCount(), equalTo(2L)); - property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(6L)); - propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(5)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); - - bucket = category.getBucketByKey("3"); - assertThat(bucket.getDocCount(), equalTo(1L)); - property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(3L)); - propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(3)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); - - bucket = category.getBucketByKey("4"); - assertThat(bucket.getDocCount(), equalTo(1L)); - property = bucket.getAggregations().get("property"); - assertThat(property.getDocCount(), equalTo(3L)); - propertyId = property.getAggregations().get("property_id"); - assertThat(propertyId.getBuckets().size(), equalTo(3)); - assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); - assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + assertNoFailuresAndResponse( + prepareSearch("idx4").addAggregation( + terms("category").field("categories") + .subAggregation(nested("property", "property").subAggregation(terms("property_id").field("property.id"))) + ), + response -> { + assertHitCount(response, 2); + + Terms category = response.getAggregations().get("category"); + assertThat(category.getBuckets().size(), equalTo(4)); + + Terms.Bucket bucket = category.getBucketByKey("1"); + assertThat(bucket.getDocCount(), equalTo(2L)); + Nested property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(6L)); + Terms propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(5)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); + + bucket = category.getBucketByKey("2"); + assertThat(bucket.getDocCount(), equalTo(2L)); + property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(6L)); + propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(5)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1L)); + + bucket = category.getBucketByKey("3"); + assertThat(bucket.getDocCount(), equalTo(1L)); + property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(3L)); + propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(3)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + + bucket = category.getBucketByKey("4"); + assertThat(bucket.getDocCount(), equalTo(1L)); + property = bucket.getAggregations().get("property"); + assertThat(property.getDocCount(), equalTo(3L)); + propertyId = property.getAggregations().get("property_id"); + assertThat(propertyId.getBuckets().size(), equalTo(3)); + assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); + assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + } + ); } public void testFilterAggInsideNestedAgg() throws Exception { @@ -665,8 +678,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { ) ); - client().prepareIndex("classes") - .setId("1") + prepareIndex("classes").setId("1") .setSource( jsonBuilder().startObject() .field("name", "QueryBuilder") @@ -705,8 +717,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { .endObject() ) .get(); - client().prepareIndex("classes") - .setId("2") + prepareIndex("classes").setId("2") .setSource( jsonBuilder().startObject() .field("name", "Document") @@ -747,45 +758,52 @@ public void testFilterAggInsideNestedAgg() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("classes").addAggregation( - nested("to_method", "methods").subAggregation( - filter( - "num_string_params", - nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + assertNoFailuresAndResponse( + prepareSearch("classes").addAggregation( + nested("to_method", "methods").subAggregation( + filter( + "num_string_params", + nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ) ) - ) - ).get(); - Nested toMethods = response.getAggregations().get("to_method"); - Filter numStringParams = toMethods.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(3L)); - - response = prepareSearch("classes").addAggregation( - nested("to_method", "methods").subAggregation( - terms("return_type").field("methods.return_type") - .subAggregation( - filter( - "num_string_params", - nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ), + response -> { + Nested toMethods = response.getAggregations().get("to_method"); + Filter numStringParams = toMethods.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(3L)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("classes").addAggregation( + nested("to_method", "methods").subAggregation( + terms("return_type").field("methods.return_type") + .subAggregation( + filter( + "num_string_params", + nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ) ) - ) - ) - ).get(); - toMethods = response.getAggregations().get("to_method"); - Terms terms = toMethods.getAggregations().get("return_type"); - Bucket bucket = terms.getBucketByKey("void"); - assertThat(bucket.getDocCount(), equalTo(3L)); - numStringParams = bucket.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(2L)); - - bucket = terms.getBucketByKey("QueryBuilder"); - assertThat(bucket.getDocCount(), equalTo(2L)); - numStringParams = bucket.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(1L)); - - bucket = terms.getBucketByKey("Query"); - assertThat(bucket.getDocCount(), equalTo(1L)); - numStringParams = bucket.getAggregations().get("num_string_params"); - assertThat(numStringParams.getDocCount(), equalTo(0L)); + ) + ), + response -> { + Nested toMethods = response.getAggregations().get("to_method"); + Terms terms = toMethods.getAggregations().get("return_type"); + Bucket bucket = terms.getBucketByKey("void"); + assertThat(bucket.getDocCount(), equalTo(3L)); + Filter numStringParams = bucket.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(2L)); + + bucket = terms.getBucketByKey("QueryBuilder"); + assertThat(bucket.getDocCount(), equalTo(2L)); + numStringParams = bucket.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(1L)); + + bucket = terms.getBucketByKey("Query"); + assertThat(bucket.getDocCount(), equalTo(1L)); + numStringParams = bucket.getAggregations().get("num_string_params"); + assertThat(numStringParams.getDocCount(), equalTo(0L)); + } + ); } public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java index 66978eba00e26..28c186c559dff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java @@ -9,8 +9,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.sampler.random.InternalRandomSampler; import org.elasticsearch.search.aggregations.bucket.sampler.random.RandomSamplerAggregationBuilder; @@ -24,6 +22,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.lessThan; @@ -61,14 +60,13 @@ public void setupSuiteScopeCluster() throws Exception { numericValue = randomDoubleBetween(5.0, 9.0, false); } builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(KEYWORD_VALUE, keywordValue) - .field(MONOTONIC_VALUE, monotonicValue) - .field(NUMERIC_VALUE, numericValue) - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(KEYWORD_VALUE, keywordValue) + .field(MONOTONIC_VALUE, monotonicValue) + .field(NUMERIC_VALUE, numericValue) + .endObject() + ) ); final double oldAvgMonotonic = avgMonotonic; @@ -87,40 +85,47 @@ public void setupSuiteScopeCluster() throws Exception { } public void testRandomSampler() { - double sampleMonotonicValue = 0.0; - double sampleNumericValue = 0.0; - double sampledDocCount = 0.0; + double[] sampleMonotonicValue = new double[1]; + double[] sampleNumericValue = new double[1]; + double[] sampledDocCount = new double[1]; for (int i = 0; i < NUM_SAMPLE_RUNS; i++) { - SearchRequest sampledRequest = prepareSearch("idx").addAggregation( - new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) - .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - ).request(); - InternalRandomSampler sampler = client().search(sampledRequest).actionGet().getAggregations().get("sampler"); - sampleMonotonicValue += ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); - sampleNumericValue += ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); - sampledDocCount += sampler.getDocCount(); + assertResponse( + prepareSearch("idx").addAggregation( + new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + ), + response -> { + InternalRandomSampler sampler = response.getAggregations().get("sampler"); + sampleMonotonicValue[0] += ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); + sampleNumericValue[0] += ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); + sampledDocCount[0] += sampler.getDocCount(); + } + ); } - sampledDocCount /= NUM_SAMPLE_RUNS; - sampleMonotonicValue /= NUM_SAMPLE_RUNS; - sampleNumericValue /= NUM_SAMPLE_RUNS; + sampledDocCount[0] /= NUM_SAMPLE_RUNS; + sampleMonotonicValue[0] /= NUM_SAMPLE_RUNS; + sampleNumericValue[0] /= NUM_SAMPLE_RUNS; double expectedDocCount = PROBABILITY * numDocs; // We're taking the mean of NUM_SAMPLE_RUNS for which each run has standard deviation // sqrt(PROBABILITY * numDocs) so the 6 sigma error, for which we expect 1 failure in // 500M runs, is 6 * sqrt(PROBABILITY * numDocs / NUM_SAMPLE_RUNS). double maxCountError = 6.0 * Math.sqrt(PROBABILITY * numDocs / NUM_SAMPLE_RUNS); - assertThat(Math.abs(sampledDocCount - expectedDocCount), lessThan(maxCountError)); - - SearchResponse trueValueResponse = prepareSearch("idx").addAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .addAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - .get(); - double trueMonotonic = ((Avg) trueValueResponse.getAggregations().get("mean_monotonic")).getValue(); - double trueNumeric = ((Avg) trueValueResponse.getAggregations().get("mean_numeric")).getValue(); - double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); - double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); - assertThat(Math.abs(sampleMonotonicValue - trueMonotonic), lessThan(maxMonotonicError)); - assertThat(Math.abs(sampleNumericValue - trueNumeric), lessThan(maxNumericError)); + assertThat(Math.abs(sampledDocCount[0] - expectedDocCount), lessThan(maxCountError)); + + assertResponse( + prepareSearch("idx").addAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .addAggregation(avg("mean_numeric").field(NUMERIC_VALUE)), + response -> { + double trueMonotonic = ((Avg) response.getAggregations().get("mean_monotonic")).getValue(); + double trueNumeric = ((Avg) response.getAggregations().get("mean_numeric")).getValue(); + double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); + double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * PROBABILITY * NUM_SAMPLE_RUNS)); + assertThat(Math.abs(sampleMonotonicValue[0] - trueMonotonic), lessThan(maxMonotonicError)); + assertThat(Math.abs(sampleNumericValue[0] - trueNumeric), lessThan(maxNumericError)); + } + ); } public void testRandomSamplerHistogram() { @@ -129,28 +134,32 @@ public void testRandomSamplerHistogram() { Map sampledDocCount = new HashMap<>(); for (int i = 0; i < NUM_SAMPLE_RUNS; i++) { - SearchRequest sampledRequest = prepareSearch("idx").addAggregation( - new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) - .subAggregation( - histogram("histo").field(NUMERIC_VALUE) - .interval(5.0) - .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - ) - ).request(); - InternalRandomSampler sampler = client().search(sampledRequest).actionGet().getAggregations().get("sampler"); - Histogram histo = sampler.getAggregations().get("histo"); - for (Histogram.Bucket bucket : histo.getBuckets()) { - sampleMonotonicValue.compute( - bucket.getKeyAsString(), - (k, v) -> ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue() + (v == null ? 0 : v) - ); - sampleNumericValue.compute( - bucket.getKeyAsString(), - (k, v) -> ((Avg) bucket.getAggregations().get("mean_numeric")).getValue() + (v == null ? 0 : v) - ); - sampledDocCount.compute(bucket.getKeyAsString(), (k, v) -> bucket.getDocCount() + (v == null ? 0 : v)); - } + assertResponse( + prepareSearch("idx").addAggregation( + new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) + .subAggregation( + histogram("histo").field(NUMERIC_VALUE) + .interval(5.0) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + ) + ), + response -> { + InternalRandomSampler sampler = response.getAggregations().get("sampler"); + Histogram histo = sampler.getAggregations().get("histo"); + for (Histogram.Bucket bucket : histo.getBuckets()) { + sampleMonotonicValue.compute( + bucket.getKeyAsString(), + (k, v) -> ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue() + (v == null ? 0 : v) + ); + sampleNumericValue.compute( + bucket.getKeyAsString(), + (k, v) -> ((Avg) bucket.getAggregations().get("mean_numeric")).getValue() + (v == null ? 0 : v) + ); + sampledDocCount.compute(bucket.getKeyAsString(), (k, v) -> bucket.getDocCount() + (v == null ? 0 : v)); + } + } + ); } for (String key : sampledDocCount.keySet()) { sampledDocCount.put(key, sampledDocCount.get(key) / NUM_SAMPLE_RUNS); @@ -158,25 +167,29 @@ public void testRandomSamplerHistogram() { sampleMonotonicValue.put(key, sampleMonotonicValue.get(key) / NUM_SAMPLE_RUNS); } - SearchResponse trueValueResponse = prepareSearch("idx").addAggregation( - histogram("histo").field(NUMERIC_VALUE) - .interval(5.0) - .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - ).get(); - Histogram histogram = trueValueResponse.getAggregations().get("histo"); - for (Histogram.Bucket bucket : histogram.getBuckets()) { - long numDocs = bucket.getDocCount(); - // Note the true count is estimated by dividing the bucket sample doc count by PROBABILITY. - double maxCountError = 6.0 * Math.sqrt(numDocs / NUM_SAMPLE_RUNS / (0.5 * PROBABILITY)); - assertThat(Math.abs(sampledDocCount.get(bucket.getKeyAsString()) - numDocs), lessThan(maxCountError)); - double trueMonotonic = ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue(); - double trueNumeric = ((Avg) bucket.getAggregations().get("mean_numeric")).getValue(); - double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); - double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); - assertThat(Math.abs(sampleMonotonicValue.get(bucket.getKeyAsString()) - trueMonotonic), lessThan(maxMonotonicError)); - assertThat(Math.abs(sampleNumericValue.get(bucket.getKeyAsString()) - trueNumeric), lessThan(maxNumericError)); - } + assertResponse( + prepareSearch("idx").addAggregation( + histogram("histo").field(NUMERIC_VALUE) + .interval(5.0) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + ), + response -> { + Histogram histogram = response.getAggregations().get("histo"); + for (Histogram.Bucket bucket : histogram.getBuckets()) { + long numDocs = bucket.getDocCount(); + // Note the true count is estimated by dividing the bucket sample doc count by PROBABILITY. + double maxCountError = 6.0 * Math.sqrt(numDocs / NUM_SAMPLE_RUNS / (0.5 * PROBABILITY)); + assertThat(Math.abs(sampledDocCount.get(bucket.getKeyAsString()) - numDocs), lessThan(maxCountError)); + double trueMonotonic = ((Avg) bucket.getAggregations().get("mean_monotonic")).getValue(); + double trueNumeric = ((Avg) bucket.getAggregations().get("mean_numeric")).getValue(); + double maxMonotonicError = 6.0 * Math.sqrt(varMonotonic / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); + double maxNumericError = 6.0 * Math.sqrt(varNumeric / (numDocs * 0.5 * PROBABILITY * NUM_SAMPLE_RUNS)); + assertThat(Math.abs(sampleMonotonicValue.get(bucket.getKeyAsString()) - trueMonotonic), lessThan(maxMonotonicError)); + assertThat(Math.abs(sampleNumericValue.get(bucket.getKeyAsString()) - trueNumeric), lessThan(maxNumericError)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 742d403ba42b0..10e3649e9f161 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; @@ -41,6 +40,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -91,24 +91,22 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i + 1) - .startArray(MULTI_VALUED_FIELD_NAME) - .value(i + 1) - .value(i + 2) - .endArray() - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i + 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i + 1) + .value(i + 2) + .endArray() + .endObject() + ) ); } createIndex("idx_unmapped"); prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource( jsonBuilder().startObject() // shift sequence by 1, to ensure we have negative values, and value 3 on the edge of the tested ranges @@ -123,312 +121,319 @@ public void setupSuiteScopeCluster() throws Exception { prepareCreate("old_index").setMapping("distance", "type=double", "route_length_miles", "type=alias,path=distance").get(); prepareCreate("new_index").setMapping("route_length_miles", "type=double").get(); - builders.add(client().prepareIndex("old_index").setSource("distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); - builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); - builders.add(client().prepareIndex("new_index").setSource(Collections.emptyMap())); + builders.add(prepareIndex("old_index").setSource("distance", 42.0)); + builders.add(prepareIndex("old_index").setSource("distance", 50.5)); + builders.add(prepareIndex("new_index").setSource("route_length_miles", 100.2)); + builders.add(prepareIndex("new_index").setSource(Collections.emptyMap())); indexRandom(true, builders); ensureSearchable(); } public void testRangeAsSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").field(MULTI_VALUED_FIELD_NAME) - .size(100) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) - ).get(); - - assertNoFailures(response); - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), equalTo(numDocs + 1)); - for (int i = 1; i < numDocs + 2; ++i) { - Terms.Bucket bucket = terms.getBucketByKey("" + i); - assertThat(bucket, notNullValue()); - final long docCount = i == 1 || i == numDocs + 1 ? 1 : 2; - assertThat(bucket.getDocCount(), equalTo(docCount)); - Range range = bucket.getAggregations().get("range"); - List buckets = range.getBuckets(); - Range.Bucket rangeBucket = buckets.get(0); - assertThat(rangeBucket.getKey(), equalTo("*-3.0")); - assertThat(rangeBucket.getKeyAsString(), equalTo("*-3.0")); - assertThat(rangeBucket, notNullValue()); - assertThat(rangeBucket.getFromAsString(), nullValue()); - assertThat(rangeBucket.getToAsString(), equalTo("3.0")); - if (i == 1 || i == 3) { - assertThat(rangeBucket.getDocCount(), equalTo(1L)); - } else if (i == 2) { - assertThat(rangeBucket.getDocCount(), equalTo(2L)); - } else { - assertThat(rangeBucket.getDocCount(), equalTo(0L)); - } - rangeBucket = buckets.get(1); - assertThat(rangeBucket.getKey(), equalTo("3.0-6.0")); - assertThat(rangeBucket.getKeyAsString(), equalTo("3.0-6.0")); - assertThat(rangeBucket, notNullValue()); - assertThat(rangeBucket.getFromAsString(), equalTo("3.0")); - assertThat(rangeBucket.getToAsString(), equalTo("6.0")); - if (i == 3 || i == 6) { - assertThat(rangeBucket.getDocCount(), equalTo(1L)); - } else if (i == 4 || i == 5) { - assertThat(rangeBucket.getDocCount(), equalTo(2L)); - } else { - assertThat(rangeBucket.getDocCount(), equalTo(0L)); - } - rangeBucket = buckets.get(2); - assertThat(rangeBucket.getKey(), equalTo("6.0-*")); - assertThat(rangeBucket.getKeyAsString(), equalTo("6.0-*")); - assertThat(rangeBucket, notNullValue()); - assertThat(rangeBucket.getFromAsString(), equalTo("6.0")); - assertThat(rangeBucket.getToAsString(), nullValue()); - if (i == 6 || i == numDocs + 1) { - assertThat(rangeBucket.getDocCount(), equalTo(1L)); - } else if (i < 6) { - assertThat(rangeBucket.getDocCount(), equalTo(0L)); - } else { - assertThat(rangeBucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME) + .size(100) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(numDocs + 1)); + for (int i = 1; i < numDocs + 2; ++i) { + Terms.Bucket bucket = terms.getBucketByKey("" + i); + assertThat(bucket, notNullValue()); + final long docCount = i == 1 || i == numDocs + 1 ? 1 : 2; + assertThat(bucket.getDocCount(), equalTo(docCount)); + Range range = bucket.getAggregations().get("range"); + List buckets = range.getBuckets(); + Range.Bucket rangeBucket = buckets.get(0); + assertThat(rangeBucket.getKey(), equalTo("*-3.0")); + assertThat(rangeBucket.getKeyAsString(), equalTo("*-3.0")); + assertThat(rangeBucket, notNullValue()); + assertThat(rangeBucket.getFromAsString(), nullValue()); + assertThat(rangeBucket.getToAsString(), equalTo("3.0")); + if (i == 1 || i == 3) { + assertThat(rangeBucket.getDocCount(), equalTo(1L)); + } else if (i == 2) { + assertThat(rangeBucket.getDocCount(), equalTo(2L)); + } else { + assertThat(rangeBucket.getDocCount(), equalTo(0L)); + } + rangeBucket = buckets.get(1); + assertThat(rangeBucket.getKey(), equalTo("3.0-6.0")); + assertThat(rangeBucket.getKeyAsString(), equalTo("3.0-6.0")); + assertThat(rangeBucket, notNullValue()); + assertThat(rangeBucket.getFromAsString(), equalTo("3.0")); + assertThat(rangeBucket.getToAsString(), equalTo("6.0")); + if (i == 3 || i == 6) { + assertThat(rangeBucket.getDocCount(), equalTo(1L)); + } else if (i == 4 || i == 5) { + assertThat(rangeBucket.getDocCount(), equalTo(2L)); + } else { + assertThat(rangeBucket.getDocCount(), equalTo(0L)); + } + rangeBucket = buckets.get(2); + assertThat(rangeBucket.getKey(), equalTo("6.0-*")); + assertThat(rangeBucket.getKeyAsString(), equalTo("6.0-*")); + assertThat(rangeBucket, notNullValue()); + assertThat(rangeBucket.getFromAsString(), equalTo("6.0")); + assertThat(rangeBucket.getToAsString(), nullValue()); + if (i == 6 || i == numDocs + 1) { + assertThat(rangeBucket.getDocCount(), equalTo(1L)); + } else if (i < 6) { + assertThat(rangeBucket.getDocCount(), equalTo(0L)); + } else { + assertThat(rangeBucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testSingleValueField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testSingleValueFieldWithFormat() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6).format("#") - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3-6")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3")); - assertThat(bucket.getToAsString(), equalTo("6")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6).format("#") + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3-6")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3")); + assertThat(bucket.getToAsString(), equalTo("6")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testSingleValueFieldWithCustomKey() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo("r1", 3).addRange("r2", 3, 6).addUnboundedFrom("r3", 6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("r1")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("r2")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("r3")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo("r1", 3).addRange("r2", 3, 6).addUnboundedFrom("r3", 6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("r1")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("r2")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("r3")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(3.0)); // 1 + 2 - assertThat(propertiesKeys[0], equalTo("*-3.0")); - assertThat(propertiesDocCounts[0], equalTo(2L)); - assertThat(propertiesCounts[0], equalTo(3.0)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(12.0)); // 3 + 4 + 5 - assertThat(propertiesKeys[1], equalTo("3.0-6.0")); - assertThat(propertiesDocCounts[1], equalTo(3L)); - assertThat(propertiesCounts[1], equalTo(12.0)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - long total = 0; - for (int i = 5; i < numDocs; ++i) { - total += i + 1; - } - assertThat(sum.value(), equalTo((double) total)); - assertThat(propertiesKeys[2], equalTo("6.0-*")); - assertThat(propertiesDocCounts[2], equalTo(numDocs - 5L)); - assertThat(propertiesCounts[2], equalTo((double) total)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(3.0)); // 1 + 2 + assertThat(propertiesKeys[0], equalTo("*-3.0")); + assertThat(propertiesDocCounts[0], equalTo(2L)); + assertThat(propertiesCounts[0], equalTo(3.0)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(12.0)); // 3 + 4 + 5 + assertThat(propertiesKeys[1], equalTo("3.0-6.0")); + assertThat(propertiesDocCounts[1], equalTo(3L)); + assertThat(propertiesCounts[1], equalTo(12.0)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long total = 0; + for (int i = 5; i < numDocs; ++i) { + total += i + 1; + } + assertThat(sum.value(), equalTo((double) total)); + assertThat(propertiesKeys[2], equalTo("6.0-*")); + assertThat(propertiesDocCounts[2], equalTo(numDocs - 5L)); + assertThat(propertiesCounts[2], equalTo((double) total)); + } + ); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(1L)); // 2 - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); // 3, 4, 5 - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(1L)); // 2 + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); // 3, 4, 5 + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -445,44 +450,45 @@ public void testSingleValuedFieldWithValueScript() throws Exception { */ public void testMultiValuedField() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -499,48 +505,49 @@ public void testMultiValuedField() throws Exception { */ public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(1L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 3L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(1L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 3L)); + } + ); } /* @@ -567,76 +574,74 @@ public void testScriptSingleValue() throws Exception { "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testEmptyRange() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(-1).addUnboundedFrom(1000) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(2)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*--1.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(-1.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("-1.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("1000.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000d)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("1000.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(-1).addUnboundedFrom(1000)), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(2)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*--1.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(-1.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("-1.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("1000.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000d)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("1000.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testNoRangesInQuery() { @@ -658,44 +663,43 @@ public void testScriptMultiValued() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation(range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + } + ); } /* @@ -716,167 +720,172 @@ public void testScriptMultiValued() throws Exception { */ public void testUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx_unmapped").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(0L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(0L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testPartiallyUnmapped() throws Exception { clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-3.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("3.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(3L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("6.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("6.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-3.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("3.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(3L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("6.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("6.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 5L)); + } + ); } public void testOverlappingRanges() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(5).addRange(3, 6).addRange(4, 5).addUnboundedFrom(4) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(range.getBuckets().size(), equalTo(4)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-5.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); - assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("5.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("3.0-6.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); - assertThat(bucket.getFromAsString(), equalTo("3.0")); - assertThat(bucket.getToAsString(), equalTo("6.0")); - assertThat(bucket.getDocCount(), equalTo(4L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("4.0-5.0")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); - assertThat(bucket.getFromAsString(), equalTo("4.0")); - assertThat(bucket.getToAsString(), equalTo("5.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("4.0-*")); - assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); - assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(bucket.getFromAsString(), equalTo("4.0")); - assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(5).addRange(3, 6).addRange(4, 5).addUnboundedFrom(4) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(range.getBuckets().size(), equalTo(4)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-5.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); + assertThat(bucket.getFromAsString(), nullValue()); + assertThat(bucket.getToAsString(), equalTo("5.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("3.0-6.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0)); + assertThat(bucket.getFromAsString(), equalTo("3.0")); + assertThat(bucket.getToAsString(), equalTo("6.0")); + assertThat(bucket.getDocCount(), equalTo(4L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("4.0-5.0")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0)); + assertThat(bucket.getFromAsString(), equalTo("4.0")); + assertThat(bucket.getToAsString(), equalTo("5.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("4.0-*")); + assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0)); + assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(bucket.getFromAsString(), equalTo("4.0")); + assertThat(bucket.getToAsString(), nullValue()); + assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); + } + ); } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(1L) - .minDocCount(0) - .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addRange("0-2", 0.0, 2.0)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, Matchers.notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, Matchers.notNullValue()); - - Range range = bucket.getAggregations().get("range"); - // TODO: use diamond once JI-9019884 is fixed - List buckets = new ArrayList<>(range.getBuckets()); - assertThat(range, Matchers.notNullValue()); - assertThat(range.getName(), equalTo("range")); - assertThat(buckets.size(), is(1)); - assertThat(buckets.get(0).getKey(), equalTo("0-2")); - assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); - assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(2.0)); - assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); - assertThat(buckets.get(0).getToAsString(), equalTo("2.0")); - assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1L) + .minDocCount(0) + .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addRange("0-2", 0.0, 2.0)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, Matchers.notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, Matchers.notNullValue()); + + Range range = bucket.getAggregations().get("range"); + // TODO: use diamond once JI-9019884 is fixed + List buckets = new ArrayList<>(range.getBuckets()); + assertThat(range, Matchers.notNullValue()); + assertThat(range.getName(), equalTo("range")); + assertThat(buckets.size(), is(1)); + assertThat(buckets.get(0).getKey(), equalTo("0-2")); + assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0)); + assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(2.0)); + assertThat(buckets.get(0).getFromAsString(), equalTo("0.0")); + assertThat(buckets.get(0).getToAsString(), equalTo("2.0")); + assertThat(buckets.get(0).getDocCount(), equalTo(0L)); + } + ); } @@ -891,8 +900,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().field("i", 1).endObject()), - client().prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().field("i", 2).endObject()) + prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().field("i", 1).endObject()), + prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().field("i", 2).endObject()) ); // Make sure we are starting with a clear cache @@ -908,14 +917,14 @@ public void testScriptCaching() throws Exception { // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - range("foo").field("i") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - .addRange(0, 10) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + range("foo").field("i") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + .addRange(0, 10) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -927,14 +936,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - range("foo").field("i") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addRange(0, 10) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + range("foo").field("i") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addRange(0, 10) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -946,8 +955,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(range("foo").field("i").addRange(0, 10)).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(range("foo").field("i").addRange(0, 10))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -960,60 +968,62 @@ public void testScriptCaching() throws Exception { } public void testFieldAlias() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation( - range("range").field("route_length_miles").addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-50.0")); - assertThat(bucket.getDocCount(), equalTo(1L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("50.0-150.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("150.0-*")); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation( + range("range").field("route_length_miles").addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-50.0")); + assertThat(bucket.getDocCount(), equalTo(1L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("50.0-150.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("150.0-*")); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } public void testFieldAliasWithMissingValue() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation( - range("range").field("route_length_miles").missing(0.0).addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) - ).get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - assertThat(range.getName(), equalTo("range")); - List buckets = range.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - Range.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("*-50.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("50.0-150.0")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("150.0-*")); - assertThat(bucket.getDocCount(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation( + range("range").field("route_length_miles").missing(0.0).addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) + ), + response -> { + Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + assertThat(range.getName(), equalTo("range")); + List buckets = range.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Range.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("*-50.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("50.0-150.0")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("150.0-*")); + assertThat(bucket.getDocCount(), equalTo(0L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java index e90e73eec5bb3..7585ed42da830 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -34,7 +33,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -126,7 +125,7 @@ private void insertIdx1(List values1, List values2) throws Excep source.startObject().field("field2", value1).endObject(); } source.endArray().endObject(); - indexRandom(false, client().prepareIndex("idx1").setRouting("1").setSource(source)); + indexRandom(false, prepareIndex("idx1").setRouting("1").setSource(source)); } private void insertIdx2(String[][] values) throws Exception { @@ -139,314 +138,318 @@ private void insertIdx2(String[][] values) throws Exception { source.endArray().endObject(); } source.endArray().endObject(); - indexRandom(false, client().prepareIndex("idx2").setRouting("1").setSource(source)); + indexRandom(false, prepareIndex("idx2").setRouting("1").setSource(source)); } public void testSimpleReverseNestedToRoot() throws Exception { - SearchResponse response = prepareSearch("idx1").addAggregation( - nested("nested1", "nested1").subAggregation( - terms("field2").field("nested1.field2") - .subAggregation( - reverseNested("nested1_to_field1").subAggregation( - terms("field1").field("field1").collectMode(randomFrom(SubAggCollectionMode.values())) + assertNoFailuresAndResponse( + prepareSearch("idx1").addAggregation( + nested("nested1", "nested1").subAggregation( + terms("field2").field("nested1.field2") + .subAggregation( + reverseNested("nested1_to_field1").subAggregation( + terms("field1").field("field1").collectMode(randomFrom(SubAggCollectionMode.values())) + ) ) - ) - ) - ).get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested1"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested1")); - assertThat(nested.getDocCount(), equalTo(25L)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - Terms usernames = nested.getAggregations().get("field2"); - assertThat(usernames, notNullValue()); - assertThat(usernames.getBuckets().size(), equalTo(9)); - List usernameBuckets = new ArrayList<>(usernames.getBuckets()); - - // nested.field2: 1 - Terms.Bucket bucket = usernameBuckets.get(0); - assertThat(bucket.getKeyAsString(), equalTo("1")); - assertThat(bucket.getDocCount(), equalTo(6L)); - ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(((InternalAggregation) reverseNested).getProperty("_count"), equalTo(5L)); - Terms tags = reverseNested.getAggregations().get("field1"); - assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(tags)); - List tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(6)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(4L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(3L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(5).getKeyAsString(), equalTo("x")); - assertThat(tagsBuckets.get(5).getDocCount(), equalTo(1L)); - - // nested.field2: 4 - bucket = usernameBuckets.get(1); - assertThat(bucket.getKeyAsString(), equalTo("4")); - assertThat(bucket.getDocCount(), equalTo(4L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(5)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); - - // nested.field2: 7 - bucket = usernameBuckets.get(2); - assertThat(bucket.getKeyAsString(), equalTo("7")); - assertThat(bucket.getDocCount(), equalTo(3L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(5)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); - - // nested.field2: 2 - bucket = usernameBuckets.get(3); - assertThat(bucket.getKeyAsString(), equalTo("2")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(3)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - - // nested.field2: 3 - bucket = usernameBuckets.get(4); - assertThat(bucket.getKeyAsString(), equalTo("3")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(3)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - - // nested.field2: 5 - bucket = usernameBuckets.get(5); - assertThat(bucket.getKeyAsString(), equalTo("5")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - // nested.field2: 6 - bucket = usernameBuckets.get(6); - assertThat(bucket.getKeyAsString(), equalTo("6")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("y")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - // nested.field2: 8 - bucket = usernameBuckets.get(7); - assertThat(bucket.getKeyAsString(), equalTo("8")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("x")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - // nested.field2: 9 - bucket = usernameBuckets.get(8); - assertThat(bucket.getKeyAsString(), equalTo("9")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested1")); + assertThat(nested.getDocCount(), equalTo(25L)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + Terms usernames = nested.getAggregations().get("field2"); + assertThat(usernames, notNullValue()); + assertThat(usernames.getBuckets().size(), equalTo(9)); + List usernameBuckets = new ArrayList<>(usernames.getBuckets()); + + // nested.field2: 1 + Terms.Bucket bucket = usernameBuckets.get(0); + assertThat(bucket.getKeyAsString(), equalTo("1")); + assertThat(bucket.getDocCount(), equalTo(6L)); + ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(((InternalAggregation) reverseNested).getProperty("_count"), equalTo(5L)); + Terms tags = reverseNested.getAggregations().get("field1"); + assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(tags)); + List tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(6)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(4L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(3L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(5).getKeyAsString(), equalTo("x")); + assertThat(tagsBuckets.get(5).getDocCount(), equalTo(1L)); + + // nested.field2: 4 + bucket = usernameBuckets.get(1); + assertThat(bucket.getKeyAsString(), equalTo("4")); + assertThat(bucket.getDocCount(), equalTo(4L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(5)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); + + // nested.field2: 7 + bucket = usernameBuckets.get(2); + assertThat(bucket.getKeyAsString(), equalTo("7")); + assertThat(bucket.getDocCount(), equalTo(3L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(5)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L)); + + // nested.field2: 2 + bucket = usernameBuckets.get(3); + assertThat(bucket.getKeyAsString(), equalTo("2")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(3)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + + // nested.field2: 3 + bucket = usernameBuckets.get(4); + assertThat(bucket.getKeyAsString(), equalTo("3")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(3)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + + // nested.field2: 5 + bucket = usernameBuckets.get(5); + assertThat(bucket.getKeyAsString(), equalTo("5")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + // nested.field2: 6 + bucket = usernameBuckets.get(6); + assertThat(bucket.getKeyAsString(), equalTo("6")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("y")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + // nested.field2: 8 + bucket = usernameBuckets.get(7); + assertThat(bucket.getKeyAsString(), equalTo("8")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("x")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + // nested.field2: 9 + bucket = usernameBuckets.get(8); + assertThat(bucket.getKeyAsString(), equalTo("9")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + } + ); } public void testSimpleNested1ToRootToNested2() throws Exception { - SearchResponse response = prepareSearch("idx2").addAggregation( - nested("nested1", "nested1").subAggregation( - reverseNested("nested1_to_root").subAggregation(nested("root_to_nested2", "nested1.nested2")) - ) - ).get(); - - assertNoFailures(response); - Nested nested = response.getAggregations().get("nested1"); - assertThat(nested.getName(), equalTo("nested1")); - assertThat(nested.getDocCount(), equalTo(9L)); - ReverseNested reverseNested = nested.getAggregations().get("nested1_to_root"); - assertThat(reverseNested.getName(), equalTo("nested1_to_root")); - assertThat(reverseNested.getDocCount(), equalTo(4L)); - nested = reverseNested.getAggregations().get("root_to_nested2"); - assertThat(nested.getName(), equalTo("root_to_nested2")); - assertThat(nested.getDocCount(), equalTo(27L)); + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + nested("nested1", "nested1").subAggregation( + reverseNested("nested1_to_root").subAggregation(nested("root_to_nested2", "nested1.nested2")) + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + assertThat(nested.getName(), equalTo("nested1")); + assertThat(nested.getDocCount(), equalTo(9L)); + ReverseNested reverseNested = nested.getAggregations().get("nested1_to_root"); + assertThat(reverseNested.getName(), equalTo("nested1_to_root")); + assertThat(reverseNested.getDocCount(), equalTo(4L)); + nested = reverseNested.getAggregations().get("root_to_nested2"); + assertThat(nested.getName(), equalTo("root_to_nested2")); + assertThat(nested.getDocCount(), equalTo(27L)); + } + ); } public void testSimpleReverseNestedToNested1() throws Exception { - SearchResponse response = prepareSearch("idx2").addAggregation( - nested("nested1", "nested1.nested2").subAggregation( - terms("field2").field("nested1.nested2.field2") - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .size(10000) - .subAggregation( - reverseNested("nested1_to_field1").path("nested1") - .subAggregation( - terms("field1").field("nested1.field1") - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ) - ) - ).get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested1"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested1")); - assertThat(nested.getDocCount(), equalTo(27L)); - assertThat(nested.getAggregations().asList().isEmpty(), is(false)); - - Terms usernames = nested.getAggregations().get("field2"); - assertThat(usernames, notNullValue()); - assertThat(usernames.getBuckets().size(), equalTo(5)); - List usernameBuckets = new ArrayList<>(usernames.getBuckets()); - - Terms.Bucket bucket = usernameBuckets.get(0); - assertThat(bucket.getKeyAsString(), equalTo("0")); - assertThat(bucket.getDocCount(), equalTo(12L)); - ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(5L)); - Terms tags = reverseNested.getAggregations().get("field1"); - List tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(2)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); - - bucket = usernameBuckets.get(1); - assertThat(bucket.getKeyAsString(), equalTo("1")); - assertThat(bucket.getDocCount(), equalTo(6L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(4L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - bucket = usernameBuckets.get(2); - assertThat(bucket.getKeyAsString(), equalTo("2")); - assertThat(bucket.getDocCount(), equalTo(5L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(4L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(4)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); - assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); - assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); - assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); - - bucket = usernameBuckets.get(3); - assertThat(bucket.getKeyAsString(), equalTo("3")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(2L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(2)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); - - bucket = usernameBuckets.get(4); - assertThat(bucket.getKeyAsString(), equalTo("4")); - assertThat(bucket.getDocCount(), equalTo(2L)); - reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(reverseNested.getDocCount(), equalTo(2L)); - tags = reverseNested.getAggregations().get("field1"); - tagsBuckets = new ArrayList<>(tags.getBuckets()); - assertThat(tagsBuckets.size(), equalTo(2)); - assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); - assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); - assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); + assertNoFailuresAndResponse( + prepareSearch("idx2").addAggregation( + nested("nested1", "nested1.nested2").subAggregation( + terms("field2").field("nested1.nested2.field2") + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .size(10000) + .subAggregation( + reverseNested("nested1_to_field1").path("nested1") + .subAggregation( + terms("field1").field("nested1.field1") + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ) + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested1")); + assertThat(nested.getDocCount(), equalTo(27L)); + assertThat(nested.getAggregations().asList().isEmpty(), is(false)); + + Terms usernames = nested.getAggregations().get("field2"); + assertThat(usernames, notNullValue()); + assertThat(usernames.getBuckets().size(), equalTo(5)); + List usernameBuckets = new ArrayList<>(usernames.getBuckets()); + + Terms.Bucket bucket = usernameBuckets.get(0); + assertThat(bucket.getKeyAsString(), equalTo("0")); + assertThat(bucket.getDocCount(), equalTo(12L)); + ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(5L)); + Terms tags = reverseNested.getAggregations().get("field1"); + List tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(2)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L)); + + bucket = usernameBuckets.get(1); + assertThat(bucket.getKeyAsString(), equalTo("1")); + assertThat(bucket.getDocCount(), equalTo(6L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(4L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + bucket = usernameBuckets.get(2); + assertThat(bucket.getKeyAsString(), equalTo("2")); + assertThat(bucket.getDocCount(), equalTo(5L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(4L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(4)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b")); + assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c")); + assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e")); + assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L)); + + bucket = usernameBuckets.get(3); + assertThat(bucket.getKeyAsString(), equalTo("3")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(2L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(2)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); + + bucket = usernameBuckets.get(4); + assertThat(bucket.getKeyAsString(), equalTo("4")); + assertThat(bucket.getDocCount(), equalTo(2L)); + reverseNested = bucket.getAggregations().get("nested1_to_field1"); + assertThat(reverseNested.getDocCount(), equalTo(2L)); + tags = reverseNested.getAggregations().get("field1"); + tagsBuckets = new ArrayList<>(tags.getBuckets()); + assertThat(tagsBuckets.size(), equalTo(2)); + assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d")); + assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L)); + assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f")); + } + ); } public void testReverseNestedAggWithoutNestedAgg() { @@ -467,26 +470,32 @@ public void testReverseNestedAggWithoutNestedAgg() { } public void testNonExistingNestedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx2").setQuery(matchAllQuery()) - .addAggregation(nested("nested2", "nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3"))) - .get(); + assertNoFailuresAndResponse( + prepareSearch("idx2").setQuery(matchAllQuery()) + .addAggregation(nested("nested2", "nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3"))), + response -> { - Nested nested = searchResponse.getAggregations().get("nested2"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("nested2")); + Nested nested = response.getAggregations().get("nested2"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("nested2")); - ReverseNested reverseNested = nested.getAggregations().get("incorrect"); - assertThat(reverseNested.getDocCount(), is(0L)); + ReverseNested reverseNested = nested.getAggregations().get("incorrect"); + assertThat(reverseNested.getDocCount(), is(0L)); + } + ); // Test that parsing the reverse_nested agg doesn't fail, because the parent nested agg is unmapped: - searchResponse = prepareSearch("idx1").setQuery(matchAllQuery()) - .addAggregation(nested("incorrect1", "incorrect1").subAggregation(reverseNested("incorrect2").path("incorrect2"))) - .get(); - - nested = searchResponse.getAggregations().get("incorrect1"); - assertThat(nested, notNullValue()); - assertThat(nested.getName(), equalTo("incorrect1")); - assertThat(nested.getDocCount(), is(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx1").setQuery(matchAllQuery()) + .addAggregation(nested("incorrect1", "incorrect1").subAggregation(reverseNested("incorrect2").path("incorrect2"))), + response -> { + + Nested nested = response.getAggregations().get("incorrect1"); + assertThat(nested, notNullValue()); + assertThat(nested.getName(), equalTo("incorrect1")); + assertThat(nested.getDocCount(), is(0L)); + } + ); } public void testSameParentDocHavingMultipleBuckets() throws Exception { @@ -526,8 +535,7 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { .endObject(); assertAcked(prepareCreate("idx3").setSettings(indexSettings(1, 0)).setMapping(mapping)); - client().prepareIndex("idx3") - .setId("1") + prepareIndex("idx3").setId("1") .setRefreshPolicy(IMMEDIATE) .setSource( jsonBuilder().startObject() @@ -603,110 +611,117 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { ) .get(); - SearchResponse response = prepareSearch("idx3").addAggregation( - nested("nested_0", "category").subAggregation( - terms("group_by_category").field("category.name") - .subAggregation( - reverseNested("to_root").subAggregation( - nested("nested_1", "sku").subAggregation( - filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( - count("sku_count").field("sku.sku_type") + assertNoFailuresAndResponse( + prepareSearch("idx3").addAggregation( + nested("nested_0", "category").subAggregation( + terms("group_by_category").field("category.name") + .subAggregation( + reverseNested("to_root").subAggregation( + nested("nested_1", "sku").subAggregation( + filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( + count("sku_count").field("sku.sku_type") + ) ) ) ) - ) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - Nested nested0 = response.getAggregations().get("nested_0"); - assertThat(nested0.getDocCount(), equalTo(3L)); - Terms terms = nested0.getAggregations().get("group_by_category"); - assertThat(terms.getBuckets().size(), equalTo(3)); - for (String bucketName : new String[] { "abc", "klm", "xyz" }) { - logger.info("Checking results for bucket {}", bucketName); - Terms.Bucket bucket = terms.getBucketByKey(bucketName); - assertThat(bucket.getDocCount(), equalTo(1L)); - ReverseNested toRoot = bucket.getAggregations().get("to_root"); - assertThat(toRoot.getDocCount(), equalTo(1L)); - Nested nested1 = toRoot.getAggregations().get("nested_1"); - assertThat(nested1.getDocCount(), equalTo(5L)); - Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); - assertThat(filterByBar.getDocCount(), equalTo(3L)); - ValueCount barCount = filterByBar.getAggregations().get("sku_count"); - assertThat(barCount.getValue(), equalTo(3L)); - } + ) + ), + response -> { + assertHitCount(response, 1); + + Nested nested0 = response.getAggregations().get("nested_0"); + assertThat(nested0.getDocCount(), equalTo(3L)); + Terms terms = nested0.getAggregations().get("group_by_category"); + assertThat(terms.getBuckets().size(), equalTo(3)); + for (String bucketName : new String[] { "abc", "klm", "xyz" }) { + logger.info("Checking results for bucket {}", bucketName); + Terms.Bucket bucket = terms.getBucketByKey(bucketName); + assertThat(bucket.getDocCount(), equalTo(1L)); + ReverseNested toRoot = bucket.getAggregations().get("to_root"); + assertThat(toRoot.getDocCount(), equalTo(1L)); + Nested nested1 = toRoot.getAggregations().get("nested_1"); + assertThat(nested1.getDocCount(), equalTo(5L)); + Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); + assertThat(filterByBar.getDocCount(), equalTo(3L)); + ValueCount barCount = filterByBar.getAggregations().get("sku_count"); + assertThat(barCount.getValue(), equalTo(3L)); + } + } + ); - response = prepareSearch("idx3").addAggregation( - nested("nested_0", "category").subAggregation( - terms("group_by_category").field("category.name") - .subAggregation( - reverseNested("to_root").subAggregation( - nested("nested_1", "sku").subAggregation( - filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( - nested("nested_2", "sku.colors").subAggregation( - filter("filter_sku_color", termQuery("sku.colors.name", "red")).subAggregation( - reverseNested("reverse_to_sku").path("sku") - .subAggregation(count("sku_count").field("sku.sku_type")) + assertNoFailuresAndResponse( + prepareSearch("idx3").addAggregation( + nested("nested_0", "category").subAggregation( + terms("group_by_category").field("category.name") + .subAggregation( + reverseNested("to_root").subAggregation( + nested("nested_1", "sku").subAggregation( + filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( + nested("nested_2", "sku.colors").subAggregation( + filter("filter_sku_color", termQuery("sku.colors.name", "red")).subAggregation( + reverseNested("reverse_to_sku").path("sku") + .subAggregation(count("sku_count").field("sku.sku_type")) + ) ) ) ) ) ) - ) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - nested0 = response.getAggregations().get("nested_0"); - assertThat(nested0.getDocCount(), equalTo(3L)); - terms = nested0.getAggregations().get("group_by_category"); - assertThat(terms.getBuckets().size(), equalTo(3)); - for (String bucketName : new String[] { "abc", "klm", "xyz" }) { - logger.info("Checking results for bucket {}", bucketName); - Terms.Bucket bucket = terms.getBucketByKey(bucketName); - assertThat(bucket.getDocCount(), equalTo(1L)); - ReverseNested toRoot = bucket.getAggregations().get("to_root"); - assertThat(toRoot.getDocCount(), equalTo(1L)); - Nested nested1 = toRoot.getAggregations().get("nested_1"); - assertThat(nested1.getDocCount(), equalTo(5L)); - Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); - assertThat(filterByBar.getDocCount(), equalTo(3L)); - Nested nested2 = filterByBar.getAggregations().get("nested_2"); - assertThat(nested2.getDocCount(), equalTo(8L)); - Filter filterBarColor = nested2.getAggregations().get("filter_sku_color"); - assertThat(filterBarColor.getDocCount(), equalTo(2L)); - ReverseNested reverseToBar = filterBarColor.getAggregations().get("reverse_to_sku"); - assertThat(reverseToBar.getDocCount(), equalTo(2L)); - ValueCount barCount = reverseToBar.getAggregations().get("sku_count"); - assertThat(barCount.getValue(), equalTo(2L)); - } + ) + ), + response -> { + assertHitCount(response, 1); + + Nested nested0 = response.getAggregations().get("nested_0"); + assertThat(nested0.getDocCount(), equalTo(3L)); + Terms terms = nested0.getAggregations().get("group_by_category"); + assertThat(terms.getBuckets().size(), equalTo(3)); + for (String bucketName : new String[] { "abc", "klm", "xyz" }) { + logger.info("Checking results for bucket {}", bucketName); + Terms.Bucket bucket = terms.getBucketByKey(bucketName); + assertThat(bucket.getDocCount(), equalTo(1L)); + ReverseNested toRoot = bucket.getAggregations().get("to_root"); + assertThat(toRoot.getDocCount(), equalTo(1L)); + Nested nested1 = toRoot.getAggregations().get("nested_1"); + assertThat(nested1.getDocCount(), equalTo(5L)); + Filter filterByBar = nested1.getAggregations().get("filter_by_sku"); + assertThat(filterByBar.getDocCount(), equalTo(3L)); + Nested nested2 = filterByBar.getAggregations().get("nested_2"); + assertThat(nested2.getDocCount(), equalTo(8L)); + Filter filterBarColor = nested2.getAggregations().get("filter_sku_color"); + assertThat(filterBarColor.getDocCount(), equalTo(2L)); + ReverseNested reverseToBar = filterBarColor.getAggregations().get("reverse_to_sku"); + assertThat(reverseToBar.getDocCount(), equalTo(2L)); + ValueCount barCount = reverseToBar.getAggregations().get("sku_count"); + assertThat(barCount.getValue(), equalTo(2L)); + } + } + ); } public void testFieldAlias() { - SearchResponse response = prepareSearch("idx1").addAggregation( - nested("nested1", "nested1").subAggregation( - terms("field2").field("nested1.field2") - .subAggregation( - reverseNested("nested1_to_field1").subAggregation( - terms("field1").field("alias").collectMode(randomFrom(SubAggCollectionMode.values())) + assertNoFailuresAndResponse( + prepareSearch("idx1").addAggregation( + nested("nested1", "nested1").subAggregation( + terms("field2").field("nested1.field2") + .subAggregation( + reverseNested("nested1_to_field1").subAggregation( + terms("field1").field("alias").collectMode(randomFrom(SubAggCollectionMode.values())) + ) ) - ) - ) - ).get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested1"); - Terms nestedTerms = nested.getAggregations().get("field2"); - Terms.Bucket bucket = nestedTerms.getBuckets().iterator().next(); - - ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); - Terms reverseNestedTerms = reverseNested.getAggregations().get("field1"); - - assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(reverseNestedTerms)); - assertThat(reverseNestedTerms.getBuckets().size(), equalTo(6)); + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested1"); + Terms nestedTerms = nested.getAggregations().get("field2"); + Terms.Bucket bucket = nestedTerms.getBuckets().iterator().next(); + + ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); + Terms reverseNestedTerms = reverseNested.getAggregations().get("field1"); + + assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(reverseNestedTerms)); + assertThat(reverseNestedTerms.getBuckets().size(), equalTo(6)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index f6d7d37a29136..7f46856cdd594 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.aggregations.BucketOrder; @@ -27,6 +26,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -75,12 +75,10 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < data.length; i++) { String[] parts = data[i].split(","); - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); - client().prepareIndex("idx_unmapped_author") - .setId("" + i) + prepareIndex("idx_unmapped_author").setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) .get(); } @@ -91,96 +89,103 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation( - terms("genres").field("genre") - .order(BucketOrder.aggregation("sample>max_price.value", asc)) - .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) - ) - .get(); - assertNoFailures(response); - Terms genres = response.getAggregations().get("genres"); - List genreBuckets = genres.getBuckets(); - // For this test to be useful we need >1 genre bucket to compare - assertThat(genreBuckets.size(), greaterThan(1)); - double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; - for (Terms.Bucket genreBucket : genres.getBuckets()) { - Sampler sample = genreBucket.getAggregations().get("sample"); - Max maxPriceInGenre = sample.getAggregations().get("max_price"); - double price = maxPriceInGenre.value(); - if (asc) { - assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); - } else { - assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + terms("genres").field("genre") + .order(BucketOrder.aggregation("sample>max_price.value", asc)) + .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) + ), + response -> { + Terms genres = response.getAggregations().get("genres"); + List genreBuckets = genres.getBuckets(); + // For this test to be useful we need >1 genre bucket to compare + assertThat(genreBuckets.size(), greaterThan(1)); + double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; + for (Terms.Bucket genreBucket : genres.getBuckets()) { + Sampler sample = genreBucket.getAggregations().get("sample"); + Max maxPriceInGenre = sample.getAggregations().get("max_price"); + double price = maxPriceInGenre.value(); + if (asc) { + assertThat(price, greaterThanOrEqualTo(lastMaxPrice)); + } else { + assertThat(price, lessThanOrEqualTo(lastMaxPrice)); + } + lastMaxPrice = price; + } } - lastMaxPrice = price; - } - + ); } public void testSimpleSampler() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - Terms authors = sample.getAggregations().get("authors"); - List testBuckets = authors.getBuckets(); - - long maxBooksPerAuthor = 0; - for (Terms.Bucket testBucket : testBuckets) { - maxBooksPerAuthor = Math.max(testBucket.getDocCount(), maxBooksPerAuthor); - } - assertThat(maxBooksPerAuthor, equalTo(3L)); + assertNoFailuresAndResponse( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + List testBuckets = authors.getBuckets(); + + long maxBooksPerAuthor = 0; + for (Terms.Bucket testBucket : testBuckets) { + maxBooksPerAuthor = Math.max(testBucket.getDocCount(), maxBooksPerAuthor); + } + assertThat(maxBooksPerAuthor, equalTo(3L)); + } + ); } public void testUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), equalTo(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertThat(authors.getBuckets().size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), equalTo(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertThat(authors.getBuckets().size(), equalTo(0)); + } + ); } public void testPartiallyUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("idx_unmapped", "test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .setExplain(true) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); - Sampler sample = response.getAggregations().get("sample"); - assertThat(sample.getDocCount(), greaterThan(0L)); - Terms authors = sample.getAggregations().get("authors"); - assertThat(authors.getBuckets().size(), greaterThan(0)); + assertNoFailuresAndResponse( + prepareSearch("idx_unmapped", "test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .setExplain(true) + .addAggregation(sampleAgg), + response -> { + Sampler sample = response.getAggregations().get("sample"); + assertThat(sample.getDocCount(), greaterThan(0L)); + Terms authors = sample.getAggregations().get("authors"); + assertThat(authors.getBuckets().size(), greaterThan(0)); + } + ); } public void testRidiculousShardSizeSampler() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(Integer.MAX_VALUE); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0) - .setSize(60) - .addAggregation(sampleAgg) - .get(); - assertNoFailures(response); + assertNoFailures( + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index b0f9556bc842b..94db8926f59e7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -36,7 +35,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -50,21 +49,20 @@ public class ShardReduceIT extends ESIntegTestCase { private IndexRequestBuilder indexDoc(String date, int value) throws Exception { - return client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("value", value) - .field("ip", "10.0.0." + value) - .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) - .field("date", date) - .field("term-l", 1) - .field("term-d", 1.5) - .field("term-s", "term") - .startObject("nested") - .field("date", date) - .endObject() - .endObject() - ); + return prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("value", value) + .field("ip", "10.0.0." + value) + .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) + .field("date", date) + .field("term-l", 1) + .field("term-d", 1.5) + .field("term-s", "term") + .startObject("nested") + .field("date", date) + .endObject() + .endObject() + ); } @Override @@ -87,246 +85,248 @@ public void setupSuiteScopeCluster() throws Exception { } public void testGlobal() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - global("global").subAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) - ) - ) - .get(); - - assertNoFailures(response); - - Global global = response.getAggregations().get("global"); - Histogram histo = global.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + global("global").subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ), + response -> { + Global global = response.getAggregations().get("global"); + Histogram histo = global.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testFilter() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - filter("filter", QueryBuilders.matchAllQuery()).subAggregation( - dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) - ) - ) - .get(); - - assertNoFailures(response); - - Filter filter = response.getAggregations().get("filter"); - Histogram histo = filter.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ), + response -> { + Filter filter = response.getAggregations().get("filter"); + Histogram histo = filter.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testMissing() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - missing("missing").field("foobar") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Missing missing = response.getAggregations().get("missing"); - Histogram histo = missing.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + missing("missing").field("foobar") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Missing missing = response.getAggregations().get("missing"); + Histogram histo = missing.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testGlobalWithFilterWithMissing() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - global("global").subAggregation( - filter("filter", QueryBuilders.matchAllQuery()).subAggregation( - missing("missing").field("foobar") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + global("global").subAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + missing("missing").field("foobar") + .subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ) ) - ) - ) - .get(); - - assertNoFailures(response); - - Global global = response.getAggregations().get("global"); - Filter filter = global.getAggregations().get("filter"); - Missing missing = filter.getAggregations().get("missing"); - Histogram histo = missing.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + ), + response -> { + Global global = response.getAggregations().get("global"); + Filter filter = global.getAggregations().get("filter"); + Missing missing = filter.getAggregations().get("missing"); + Histogram histo = missing.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testNested() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - nested("nested", "nested").subAggregation( - dateHistogram("histo").field("nested.date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) - ) - ) - .get(); - - assertNoFailures(response); - - Nested nested = response.getAggregations().get("nested"); - Histogram histo = nested.getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + nested("nested", "nested").subAggregation( + dateHistogram("histo").field("nested.date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ), + response -> { + Nested nested = response.getAggregations().get("nested"); + Histogram histo = nested.getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testStringTerms() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - terms("terms").field("term-s") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - Histogram histo = terms.getBucketByKey("term").getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-s") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + Histogram histo = terms.getBucketByKey("term").getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testLongTerms() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - terms("terms").field("term-l") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - Histogram histo = terms.getBucketByKey("1").getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-l") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + Histogram histo = terms.getBucketByKey("1").getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testDoubleTerms() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - terms("terms").field("term-d") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - Histogram histo = terms.getBucketByKey("1.5").getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-d") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + Histogram histo = terms.getBucketByKey("1.5").getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testRange() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - range("range").field("value") - .addRange("r1", 0, 10) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + range("range").field("value") + .addRange("r1", 0, 10) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Range range = response.getAggregations().get("range"); + Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testDateRange() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - dateRange("range").field("date") - .addRange("r1", "2014-01-01", "2014-01-10") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + dateRange("range").field("date") + .addRange("r1", "2014-01-01", "2014-01-10") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Range range = response.getAggregations().get("range"); + Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testIpRange() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - ipRange("range").field("ip") - .addRange("r1", "10.0.0.1", "10.0.0.10") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Range range = response.getAggregations().get("range"); - Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + ipRange("range").field("ip") + .addRange("r1", "10.0.0.1", "10.0.0.10") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Range range = response.getAggregations().get("range"); + Histogram histo = range.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testHistogram() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - histogram("topHisto").field("value") - .interval(5) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Histogram topHisto = response.getAggregations().get("topHisto"); - Histogram histo = topHisto.getBuckets().get(0).getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + histogram("topHisto").field("value") + .interval(5) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Histogram topHisto = response.getAggregations().get("topHisto"); + Histogram histo = topHisto.getBuckets().get(0).getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testDateHistogram() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - dateHistogram("topHisto").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - Histogram topHisto = response.getAggregations().get("topHisto"); - Histogram histo = topHisto.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + dateHistogram("topHisto").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + Histogram topHisto = response.getAggregations().get("topHisto"); + Histogram histo = topHisto.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testGeoHashGrid() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geohashGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + geohashGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } public void testGeoTileGrid() throws Exception { - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) - .addAggregation( - geotileGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) - ) - .get(); - - assertNoFailures(response); - - GeoGrid grid = response.getAggregations().get("grid"); - Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); - assertThat(histo.getBuckets().size(), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + geotileGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ), + response -> { + GeoGrid grid = response.getAggregations().get("grid"); + Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); + assertThat(histo.getBuckets().size(), equalTo(4)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index 2c0c7766b646c..b8a1b3df8cf60 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -18,6 +17,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; public class ShardSizeTermsIT extends ShardSizeTestCase { @@ -26,22 +26,27 @@ public void testNoShardSizeString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("3", 8L); - expected.put("2", 5L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("3", 8L); + expected.put("2", 5L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testShardSizeEqualsSizeString() throws Exception { @@ -49,26 +54,28 @@ public void testShardSizeEqualsSizeString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("3", 8L); - expected.put("2", 4L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("3", 8L); + expected.put("2", 4L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testWithShardSizeString() throws Exception { @@ -77,26 +84,28 @@ public void testWithShardSizeString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("3", 8L); - expected.put("2", 5L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("3", 8L); + expected.put("2", 5L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testWithShardSizeStringSingleShard() throws Exception { @@ -105,27 +114,29 @@ public void testWithShardSizeStringSingleShard() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put("1", 5L); - expected.put("2", 4L); - expected.put("3", 3L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put("1", 5L); + expected.put("2", 4L); + expected.put("3", 3L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey()))); + } + } + ); } public void testNoShardSizeTermOrderString() throws Exception { @@ -133,22 +144,24 @@ public void testNoShardSizeTermOrderString() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put("1", 8L); - expected.put("2", 5L); - expected.put("3", 8L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put("1", 8L); + expected.put("2", 5L); + expected.put("3", 8L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + } + } + ); } public void testNoShardSizeLong() throws Exception { @@ -156,22 +169,27 @@ public void testNoShardSizeLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testShardSizeEqualsSizeLong() throws Exception { @@ -179,26 +197,28 @@ public void testShardSizeEqualsSizeLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 4L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 4L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeLong() throws Exception { @@ -206,26 +226,28 @@ public void testWithShardSizeLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeLongSingleShard() throws Exception { @@ -234,27 +256,29 @@ public void testWithShardSizeLongSingleShard() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) - Map expected = new HashMap<>(); - expected.put(1, 5L); - expected.put(2, 4L); - expected.put(3, 3L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) + Map expected = new HashMap<>(); + expected.put(1, 5L); + expected.put(2, 4L); + expected.put(3, 3L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testNoShardSizeTermOrderLong() throws Exception { @@ -262,22 +286,24 @@ public void testNoShardSizeTermOrderLong() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(2, 5L); - expected.put(3, 8L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(2, 5L); + expected.put(3, 8L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testNoShardSizeDouble() throws Exception { @@ -285,22 +311,27 @@ public void testNoShardSizeDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testShardSizeEqualsSizeDouble() throws Exception { @@ -308,26 +339,28 @@ public void testShardSizeEqualsSizeDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 4L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 4L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeDouble() throws Exception { @@ -335,26 +368,28 @@ public void testWithShardSizeDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(3, 8L); - expected.put(2, 5L); // <-- count is now fixed - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(3, 8L); + expected.put(2, 5L); // <-- count is now fixed + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testWithShardSizeDoubleSingleShard() throws Exception { @@ -362,27 +397,29 @@ public void testWithShardSizeDoubleSingleShard() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key") - .size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .shardSize(5) - .order(BucketOrder.count(false)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 5L); - expected.put(2, 4L); - expected.put(3, 3L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 5L); + expected.put(2, 4L); + expected.put(3, 3L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } public void testNoShardSizeTermOrderDouble() throws Exception { @@ -390,21 +427,23 @@ public void testNoShardSizeTermOrderDouble() throws Exception { indexData(); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) - ) - .get(); - - Terms terms = response.getAggregations().get("keys"); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - Map expected = new HashMap<>(); - expected.put(1, 8L); - expected.put(2, 5L); - expected.put(3, 8L); - for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ), + response -> { + Terms terms = response.getAggregations().get("keys"); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Map expected = new HashMap<>(); + expected.put(1, 8L); + expected.put(2, 5L); + expected.put(3, 8L); + for (Terms.Bucket bucket : buckets) { + assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 4d94173f8d978..b672325891b50 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; @@ -57,7 +56,9 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.significantText; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertCheckedResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -130,71 +131,70 @@ public void testXContentResponse() throws Exception { ); } - SearchResponse response = request.get(); - - assertNoFailures(response); - StringTerms classes = response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - for (Terms.Bucket classBucket : classes.getBuckets()) { - Map aggs = classBucket.getAggregations().asMap(); - assertTrue(aggs.containsKey("sig_terms")); - SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); - assertThat(agg.getBuckets().size(), equalTo(1)); - String term = agg.iterator().next().getKeyAsString(); - String classTerm = classBucket.getKeyAsString(); - assertTrue(term.equals(classTerm)); - } + assertCheckedResponse(request, response -> { + assertNoFailures(response); + StringTerms classes = response.getAggregations().get("class"); + assertThat(classes.getBuckets().size(), equalTo(2)); + for (Terms.Bucket classBucket : classes.getBuckets()) { + Map aggs = classBucket.getAggregations().asMap(); + assertTrue(aggs.containsKey("sig_terms")); + SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); + assertThat(agg.getBuckets().size(), equalTo(1)); + String term = agg.iterator().next().getKeyAsString(); + String classTerm = classBucket.getKeyAsString(); + assertTrue(term.equals(classTerm)); + } - XContentBuilder responseBuilder = XContentFactory.jsonBuilder(); - responseBuilder.startObject(); - classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); - responseBuilder.endObject(); - - Object[] args = new Object[] { type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\"" }; - String result = Strings.format(""" - { - "class": { - "doc_count_error_upper_bound": 0, - "sum_other_doc_count": 0, - "buckets": [ - { - "key": "0", - "doc_count": 4, - "sig_terms": { - "doc_count": 4, - "bg_count": 7, - "buckets": [ - { - "key": %s, + XContentBuilder responseBuilder = XContentFactory.jsonBuilder(); + responseBuilder.startObject(); + classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); + responseBuilder.endObject(); + + Object[] args = new Object[] { type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\"" }; + String result = Strings.format(""" + { + "class": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "0", + "doc_count": 4, + "sig_terms": { "doc_count": 4, - "score": 0.39999999999999997, - "bg_count": 5 + "bg_count": 7, + "buckets": [ + { + "key": %s, + "doc_count": 4, + "score": 0.39999999999999997, + "bg_count": 5 + } + ] } - ] - } - }, - { - "key": "1", - "doc_count": 3, - "sig_terms": { - "doc_count": 3, - "bg_count": 7, - "buckets": [ - { - "key":%s, + }, + { + "key": "1", + "doc_count": 3, + "sig_terms": { "doc_count": 3, - "score": 0.75, - "bg_count": 4 + "bg_count": 7, + "buckets": [ + { + "key":%s, + "doc_count": 3, + "score": 0.75, + "bg_count": 4 + } + ] } - ] - } + } + ] } - ] - } - } - """, args); - assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); - + } + """, args); + assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); + }); } public void testPopularTermManyDeletedDocs() throws Exception { @@ -208,10 +208,10 @@ public void testPopularTermManyDeletedDocs() throws Exception { String[] cat2v1 = { "constant", "two" }; String[] cat2v2 = { "constant", "duo" }; List indexRequestBuilderList = new ArrayList<>(); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, cat1v1, CLASS_FIELD, "1")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("2").setSource(TEXT_FIELD, cat1v2, CLASS_FIELD, "1")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("3").setSource(TEXT_FIELD, cat2v1, CLASS_FIELD, "2")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("4").setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, cat1v1, CLASS_FIELD, "1")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("2").setSource(TEXT_FIELD, cat1v2, CLASS_FIELD, "1")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("3").setSource(TEXT_FIELD, cat2v1, CLASS_FIELD, "2")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("4").setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2")); indexRandom(true, false, indexRequestBuilderList); // Now create some holes in the index with selective deletes caused by updates. @@ -222,7 +222,7 @@ public void testPopularTermManyDeletedDocs() throws Exception { indexRequestBuilderList.clear(); for (int i = 0; i < 50; i++) { text = text == cat1v2 ? cat1v1 : cat1v2; - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, text, CLASS_FIELD, "1")); + indexRequestBuilderList.add(prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, text, CLASS_FIELD, "1")); } indexRandom(true, false, indexRequestBuilderList); @@ -286,9 +286,6 @@ public void testBackgroundVsSeparateSet( ); } - SearchResponse response1 = request1.get(); - assertNoFailures(response1); - SearchRequestBuilder request2; if (useSigText) { request2 = prepareSearch(INDEX_NAME).addAggregation( @@ -324,32 +321,32 @@ public void testBackgroundVsSeparateSet( ); } - SearchResponse response2 = request2.get(); - - StringTerms classes = response1.getAggregations().get("class"); + assertNoFailuresAndResponse(request1, response1 -> assertNoFailuresAndResponse(request2, response2 -> { + StringTerms classes = response1.getAggregations().get("class"); - SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); - assertThat(sigTerms0.getBuckets().size(), equalTo(2)); - double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); - double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); - SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); - double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); - double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); + SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); + assertThat(sigTerms0.getBuckets().size(), equalTo(2)); + double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); + double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); + SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); + double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); + double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); - Aggregations aggs = response2.getAggregations(); + Aggregations aggs = response2.getAggregations(); - sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); - double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); - double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); + sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); + double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); + double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); - sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); - double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); - double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); + sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); + double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); + double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); - assertThat(score00Background, equalTo(score00SeparateSets)); - assertThat(score01Background, equalTo(score01SeparateSets)); - assertThat(score10Background, equalTo(score10SeparateSets)); - assertThat(score11Background, equalTo(score11SeparateSets)); + assertThat(score00Background, equalTo(score00SeparateSets)); + assertThat(score01Background, equalTo(score01SeparateSets)); + assertThat(score10Background, equalTo(score10SeparateSets)); + assertThat(score11Background, equalTo(score11SeparateSets)); + })); } public void testScoresEqualForPositiveAndNegative() throws Exception { @@ -385,25 +382,23 @@ public void testScoresEqualForPositiveAndNegative(SignificanceHeuristic heuristi ) ); } - SearchResponse response = request.get(); - assertNoFailures(response); - - assertNoFailures(response); - StringTerms classes = response.getAggregations().get("class"); - assertThat(classes.getBuckets().size(), equalTo(2)); - Iterator classBuckets = classes.getBuckets().iterator(); - - Aggregations aggregations = classBuckets.next().getAggregations(); - SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); - - List classA = sigTerms.getBuckets(); - Iterator classBBucketIterator = sigTerms.iterator(); - assertThat(classA.size(), greaterThan(0)); - for (SignificantTerms.Bucket classABucket : classA) { - SignificantTerms.Bucket classBBucket = classBBucketIterator.next(); - assertThat(classABucket.getKey(), equalTo(classBBucket.getKey())); - assertThat(classABucket.getSignificanceScore(), closeTo(classBBucket.getSignificanceScore(), 1.e-5)); - } + assertNoFailuresAndResponse(request, response -> { + StringTerms classes = response.getAggregations().get("class"); + assertThat(classes.getBuckets().size(), equalTo(2)); + Iterator classBuckets = classes.getBuckets().iterator(); + + Aggregations aggregations = classBuckets.next().getAggregations(); + SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); + + List classA = sigTerms.getBuckets(); + Iterator classBBucketIterator = sigTerms.iterator(); + assertThat(classA.size(), greaterThan(0)); + for (SignificantTerms.Bucket classABucket : classA) { + SignificantTerms.Bucket classBBucket = classBBucketIterator.next(); + assertThat(classABucket.getKey(), equalTo(classBBucket.getKey())); + assertThat(classABucket.getSignificanceScore(), closeTo(classBBucket.getSignificanceScore(), 1.e-5)); + } + }); } /** @@ -423,16 +418,15 @@ public void testSubAggregations() throws Exception { .size(1000) .subAggregation(subAgg); - SearchResponse response = prepareSearch("test").setQuery(query).addAggregation(agg).get(); - assertNoFailures(response); - - SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); - assertThat(sigTerms.getBuckets().size(), equalTo(2)); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(query).addAggregation(agg), response -> { + SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); + assertThat(sigTerms.getBuckets().size(), equalTo(2)); - for (SignificantTerms.Bucket bucket : sigTerms) { - StringTerms terms = bucket.getAggregations().get("class"); - assertThat(terms.getBuckets().size(), equalTo(2)); - } + for (SignificantTerms.Bucket bucket : sigTerms) { + StringTerms terms = bucket.getAggregations().get("class"); + assertThat(terms.getBuckets().size(), equalTo(2)); + } + }); } private void indexEqualTestData() throws ExecutionException, InterruptedException { @@ -463,7 +457,7 @@ private void indexEqualTestData() throws ExecutionException, InterruptedExceptio List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < data.length; i++) { String[] parts = data[i].split("\t"); - indexRequestBuilders.add(client().prepareIndex("test").setId("" + i).setSource("class", parts[0], "text", parts[1])); + indexRequestBuilders.add(prepareIndex("test").setId("" + i).setSource("class", parts[0], "text", parts[1])); } indexRandom(true, false, indexRequestBuilders); } @@ -497,17 +491,17 @@ public void testScriptScore() throws ExecutionException, InterruptedException, I ) ); } - SearchResponse response = request.get(); - assertNoFailures(response); - for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { - SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); - for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { - assertThat( - bucket.getSignificanceScore(), - is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()) - ); + assertNoFailuresAndResponse(request, response -> { + for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { + SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); + for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { + assertThat( + bucket.getSignificanceScore(), + is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()) + ); + } } - } + }); } private ScriptHeuristic getScriptSignificanceHeuristic() throws IOException { @@ -539,7 +533,7 @@ private void indexRandomFrequencies01(String type) throws ExecutionException, In text[0] = gb[randNum]; } indexRequestBuilderList.add( - client().prepareIndex(INDEX_NAME).setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero") + prepareIndex(INDEX_NAME).setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero") ); } indexRandom(true, indexRequestBuilderList); @@ -560,8 +554,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1, "t", "foo"), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2, "t", "bar") + prepareIndex("cache_test_idx").setId("1").setSource("s", 1, "t", "foo"), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2, "t", "bar") ); // Make sure we are starting with a clear cache @@ -579,17 +573,15 @@ public void testScriptCaching() throws Exception { new Script(ScriptType.INLINE, "mockscript", "Math.random()", Collections.emptyMap()) ); boolean useSigText = randomBoolean(); - SearchResponse r; + SearchRequestBuilder request; if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)); } else { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -604,15 +596,13 @@ public void testScriptCaching() throws Exception { scriptHeuristic = getScriptSignificanceHeuristic(); useSigText = randomBoolean(); if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)); } else { - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) - .get(); + request = prepareSearch("cache_test_idx").setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -625,11 +615,11 @@ public void testScriptCaching() throws Exception { // Ensure that non-scripted requests are cached as normal if (useSigText) { - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantText("foo", "t")).get(); + request = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantText("foo", "t")); } else { - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantTerms("foo").field("s")).get(); + request = prepareSearch("cache_test_idx").setSize(0).addAggregation(significantTerms("foo").field("s")); } - assertNoFailures(r); + assertNoFailures(request); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 58609df7ae8fe..25d6dfb850bbc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -28,7 +28,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -57,8 +57,7 @@ public void setupSuiteScopeCluster() throws Exception { int numUniqueTerms = between(2, numDocs / 2); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource( jsonBuilder().startObject() .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) @@ -74,8 +73,7 @@ public void setupSuiteScopeCluster() throws Exception { ); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx_single_shard") - .setId("" + i) + prepareIndex("idx_single_shard").setId("" + i) .setSource( jsonBuilder().startObject() .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) @@ -89,8 +87,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("idx_with_routing").setMapping("{ \"_routing\" : { \"required\" : true } }")); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx_single_shard") - .setId("" + i) + prepareIndex("idx_single_shard").setId("" + i) .setRouting(String.valueOf(randomInt(numRoutingValues))) .setSource( jsonBuilder().startObject() @@ -162,8 +159,7 @@ private void buildIndex(Map docsPerTerm, String index, int shar for (int i = 0; i < entry.getValue(); i++) { String term = entry.getKey(); builders.add( - client().prepareIndex(index) - .setId(term + "-" + i) + prepareIndex(index).setId(term + "-" + i) .setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, term).field("shard", shard).endObject()) ); } @@ -267,691 +263,643 @@ private void assertUnboundedDocCountError(int size, SearchResponse accurateRespo public void testStringValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testStringValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(STRING_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testStringValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testStringValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testStringValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testLongValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(LONG_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testLongValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testLongValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testLongValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertDocCountErrorWithinBounds(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertDocCountErrorWithinBounds(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); - } - - public void testDoubleValueFieldWithRouting() throws Exception { - int size = randomIntBetween(1, 20); - int shardSize = randomIntBetween(size, size * 2); - - SearchResponse testResponse = prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation( + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) + .size(10000) + .shardSize(10000) .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) ) - .get(); + ); + } - assertNoFailures(testResponse); + public void testDoubleValueFieldWithRouting() throws Exception { + int size = randomIntBetween(1, 20); + int shardSize = randomIntBetween(size, size * 2); - assertNoDocCountErrorSingleResponse(size, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_with_routing").setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountErrorSingleResponse(size, testResponse) + ); } public void testDoubleValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(testResponse); - - assertNoDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + testResponse -> assertNoDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } public void testDoubleValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); - SearchResponse accurateResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000) - .shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(accurateResponse); - - SearchResponse testResponse = prepareSearch("idx_single_shard").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) - ).get(); - - assertNoFailures(testResponse); - - assertUnboundedDocCountError(size, accurateResponse, testResponse); + assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + accurateResponse -> assertNoFailuresAndResponse( + prepareSearch("idx_single_shard").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ), + testResponse -> assertUnboundedDocCountError(size, accurateResponse, testResponse) + ) + ); } /** @@ -960,52 +908,54 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { * 3 one-shard indices. */ public void testFixedDocs() throws Exception { - SearchResponse response = prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5) - .shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getDocCountError(), equalTo(46L)); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(5)); - - Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("A")); - assertThat(bucket.getDocCount(), equalTo(100L)); - assertThat(bucket.getDocCountError(), equalTo(0L)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("Z")); - assertThat(bucket.getDocCount(), equalTo(52L)); - assertThat(bucket.getDocCountError(), equalTo(2L)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("C")); - assertThat(bucket.getDocCount(), equalTo(50L)); - assertThat(bucket.getDocCountError(), equalTo(15L)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("G")); - assertThat(bucket.getDocCount(), equalTo(45L)); - assertThat(bucket.getDocCountError(), equalTo(2L)); - - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("B")); - assertThat(bucket.getDocCount(), equalTo(43L)); - assertThat(bucket.getDocCountError(), equalTo(29L)); + assertNoFailuresAndResponse( + prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getDocCountError(), equalTo(46L)); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(5)); + + Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("A")); + assertThat(bucket.getDocCount(), equalTo(100L)); + assertThat(bucket.getDocCountError(), equalTo(0L)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("Z")); + assertThat(bucket.getDocCount(), equalTo(52L)); + assertThat(bucket.getDocCountError(), equalTo(2L)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("C")); + assertThat(bucket.getDocCount(), equalTo(50L)); + assertThat(bucket.getDocCountError(), equalTo(15L)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("G")); + assertThat(bucket.getDocCount(), equalTo(45L)); + assertThat(bucket.getDocCountError(), equalTo(2L)); + + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("B")); + assertThat(bucket.getDocCount(), equalTo(43L)); + assertThat(bucket.getDocCountError(), equalTo(29L)); + } + ); } /** @@ -1013,16 +963,19 @@ public void testFixedDocs() throws Exception { * See https://github.com/elastic/elasticsearch/issues/40005 for more details */ public void testIncrementalReduction() { - SearchResponse response = prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5) - .shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - Terms terms = response.getAggregations().get("terms"); - assertThat(terms.getDocCountError(), equalTo(0L)); + assertNoFailuresAndResponse( + prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms.getDocCountError(), equalTo(0L)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index ffb9539bee735..c8e23d65b4e37 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; @@ -26,6 +25,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; public class TermsShardMinDocCountIT extends ESIntegTestCase { @@ -61,44 +61,51 @@ public void testShardMinDocCountSignificantTermsTest() throws Exception { indexRandom(true, false, indexBuilders); // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned - SearchResponse response = prepareSearch(index).addAggregation( - (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( - significantTerms("mySignificantTerms").field("text") - .minDocCount(2) - .size(2) - .shardSize(2) - .executionHint(randomExecutionHint()) - ) - ).get(); - assertNoFailures(response); - InternalFilter filteredBucket = response.getAggregations().get("inclass"); - SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(0)); - - response = prepareSearch(index).addAggregation( - (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( - significantTerms("mySignificantTerms").field("text") - .minDocCount(2) - .shardSize(2) - .shardMinDocCount(2) - .size(2) - .executionHint(randomExecutionHint()) - ) - ).get(); - assertNoFailures(response); - filteredBucket = response.getAggregations().get("inclass"); - sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(2)); + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( + significantTerms("mySignificantTerms").field("text") + .minDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + ) + ), + response -> { + InternalFilter filteredBucket = response.getAggregations().get("inclass"); + SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(0)); + } + ); + + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( + significantTerms("mySignificantTerms").field("text") + .minDocCount(2) + .shardSize(2) + .shardMinDocCount(2) + .size(2) + .executionHint(randomExecutionHint()) + ) + ), + response -> { + assertNoFailures(response); + InternalFilter filteredBucket = response.getAggregations().get("inclass"); + SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(2)); + } + ); } private void addTermsDocs(String term, int numInClass, int numNotInClass, List builders) { String sourceClass = "{\"text\": \"" + term + "\", \"class\":" + "true" + "}"; String sourceNotClass = "{\"text\": \"" + term + "\", \"class\":" + "false" + "}"; for (int i = 0; i < numInClass; i++) { - builders.add(client().prepareIndex(index).setSource(sourceClass, XContentType.JSON)); + builders.add(prepareIndex(index).setSource(sourceClass, XContentType.JSON)); } for (int i = 0; i < numNotInClass; i++) { - builders.add(client().prepareIndex(index).setSource(sourceNotClass, XContentType.JSON)); + builders.add(prepareIndex(index).setSource(sourceNotClass, XContentType.JSON)); } } @@ -122,37 +129,42 @@ public void testShardMinDocCountTermsTest() throws Exception { indexRandom(true, false, indexBuilders); // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned - SearchResponse response = prepareSearch(index).addAggregation( - terms("myTerms").field("text") - .minDocCount(2) - .size(2) - .shardSize(2) - .executionHint(randomExecutionHint()) - .order(BucketOrder.key(true)) - ).get(); - assertNoFailures(response); - Terms sigterms = response.getAggregations().get("myTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(0)); - - response = prepareSearch(index).addAggregation( - terms("myTerms").field("text") - .minDocCount(2) - .shardMinDocCount(2) - .size(2) - .shardSize(2) - .executionHint(randomExecutionHint()) - .order(BucketOrder.key(true)) - ).get(); - assertNoFailures(response); - sigterms = response.getAggregations().get("myTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(2)); - + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + terms("myTerms").field("text") + .minDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + .order(BucketOrder.key(true)) + ), + response -> { + Terms sigterms = response.getAggregations().get("myTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(0)); + } + ); + + assertNoFailuresAndResponse( + prepareSearch(index).addAggregation( + terms("myTerms").field("text") + .minDocCount(2) + .shardMinDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + .order(BucketOrder.key(true)) + ), + response -> { + Terms sigterms = response.getAggregations().get("myTerms"); + assertThat(sigterms.getBuckets().size(), equalTo(2)); + } + ); } private static void addTermsDocs(String term, int numDocs, List builders) { String sourceClass = "{\"text\": \"" + term + "\"}"; for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex(index).setSource(sourceClass, XContentType.JSON)); + builders.add(prepareIndex(index).setSource(sourceClass, XContentType.JSON)); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java index 02d16804198dd..2dccda385bf53 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java @@ -10,8 +10,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -19,6 +17,7 @@ import org.hamcrest.Matchers; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; /** * Test that index enough data to trigger the creation of Cuckoo filters. @@ -56,11 +55,13 @@ public void testSingleValuedString() { } private void assertNumRareTerms(int maxDocs, int rareTerms) { - final SearchRequestBuilder requestBuilder = client().prepareSearch(index); - requestBuilder.addAggregation(new RareTermsAggregationBuilder("rareTerms").field("str_value.keyword").maxDocCount(maxDocs)); - final SearchResponse response = requestBuilder.get(); - assertNoFailures(response); - final RareTerms terms = response.getAggregations().get("rareTerms"); - assertThat(terms.getBuckets().size(), Matchers.equalTo(rareTerms)); + assertNoFailuresAndResponse( + client().prepareSearch(index) + .addAggregation(new RareTermsAggregationBuilder("rareTerms").field("str_value.keyword").maxDocCount(maxDocs)), + response -> { + final RareTerms terms = response.getAggregations().get("rareTerms"); + assertThat(terms.getBuckets().size(), Matchers.equalTo(rareTerms)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index ceafd07c67d65..2277f4415d4db 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -59,6 +58,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -126,19 +126,18 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 5; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, "val" + i) - .field("i", i) - .field("constant", 1) - .field("tag", i < 5 / 2 + 1 ? "more" : "less") - .startArray(MULTI_VALUED_FIELD_NAME) - .value("val" + i) - .value("val" + (i + 1)) - .endArray() - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + i) + .field("i", i) + .field("constant", 1) + .field("tag", i < 5 / 2 + 1 ? "more" : "less") + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + i) + .value("val" + (i + 1)) + .endArray() + .endObject() + ) ); } @@ -150,24 +149,22 @@ public void setupSuiteScopeCluster() throws Exception { ); for (int i = 0; i < 100; i++) { builders.add( - client().prepareIndex("high_card_idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) - .startArray(MULTI_VALUED_FIELD_NAME) - .value("val" + Strings.padStart(i + "", 3, '0')) - .value("val" + Strings.padStart((i + 1) + "", 3, '0')) - .endArray() - .endObject() - ) + prepareIndex("high_card_idx").setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + Strings.padStart(i + "", 3, '0')) + .value("val" + Strings.padStart((i + 1) + "", 3, '0')) + .endArray() + .endObject() + ) ); } prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -227,45 +224,55 @@ private void getMultiSortDocs(List builders) throws IOExcep ); for (int i = 1; i <= 3; i++) { builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject() + ) ); } builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject() + ) ); builders.add( - client().prepareIndex("sort_idx") - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject()) + prepareIndex("sort_idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject() + ) ); } @@ -295,132 +302,148 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms - SearchResponse allResponse = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(allResponse); - StringTerms terms = allResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - int expectedCardinality = terms.getBuckets().size(); + int[] expectedCardinality = new int[1]; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + expectedCardinality[0] = terms.getBuckets().size(); + } + ); // Gather terms using partitioned aggregations final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - assertNoFailures(response); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertTrue(foundTerms.add(bucket.getKeyAsString())); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertTrue(foundTerms.add(bucket.getKeyAsString())); + } + } + ); } - assertEquals(expectedCardinality, foundTerms.size()); + assertEquals(expectedCardinality[0], foundTerms.size()); } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value.substring(0,3)", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); - - StringTerms.Bucket bucket = terms.getBucketByKey("val"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val")); - assertThat(bucket.getDocCount(), equalTo(5L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value.substring(0,3)", Collections.emptyMap())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + + StringTerms.Bucket bucket = terms.getBucketByKey("val"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val")); + assertThat(bucket.getDocCount(), equalTo(5L)); + } + ); } public void testMultiValuedScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()) - ) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) + ) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("foo_val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("foo_val" + i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } /* @@ -443,25 +466,26 @@ public void testScriptSingleValue() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script(script) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .script(script) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptSingleValueExplicitSingleValue() throws Exception { @@ -472,108 +496,114 @@ public void testScriptSingleValueExplicitSingleValue() throws Exception { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script(script) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .script(script) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testScriptMultiValued() throws Exception { - SearchResponse response = prepareSearch("idx") - - .addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script( - new Script( - ScriptType.INLINE, - CustomScriptPlugin.NAME, - "doc['" + MULTI_VALUED_FIELD_NAME + "']", - Collections.emptyMap() + assertNoFailuresAndResponse( + prepareSearch("idx") + + .addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) ) - ) - ) - .get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(6)); - - for (int i = 0; i < 6; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - if (i == 0 || i == 5) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } else { - assertThat(bucket.getDocCount(), equalTo(2L)); + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(6)); + + for (int i = 0; i < 6; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + if (i == 0 || i == 5) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } else { + assertThat(bucket.getDocCount(), equalTo(2L)); + } + } } - } + ); } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = prepareSearch("idx", "idx_unmapped").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx", "idx_unmapped").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + ); } public void testStringTermsNestedIntoPerBucketAggregator() throws Exception { // no execution hint so that the logic that decides whether or not to use ordinals is executed - SearchResponse response = prepareSearch("idx").addAggregation( - filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( - new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertThat(response.getFailedShards(), equalTo(0)); - - Filter filter = response.getAggregations().get("filter"); - - StringTerms terms = filter.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - for (int i = 2; i <= 4; i++) { - StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(i == 3 ? 2L : 1L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( + new TermsAggregationBuilder("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + assertThat(response.getFailedShards(), equalTo(0)); + + Filter filter = response.getAggregations().get("filter"); + + StringTerms terms = filter.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + for (int i = 2; i <= 4; i++) { + StringTerms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(i == 3 ? 2L : 1L)); + } + } + ); } public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { @@ -612,93 +642,95 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - Terms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - Filter filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - filter = tag.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + Terms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + Filter filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 2L : 3L)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + filter = tag.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(asc ? 3L : 2L)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(stats("stats").field("i")) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(stats("stats").field("i")) + ) ) - ) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "more" is 2 - // the max for "less" is 4 - - StringTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Stats stats = filter2.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get("filter2"); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - stats = filter2.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "more" is 2 + // the max for "less" is 4 + + StringTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Stats stats = filter2.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get("filter2"); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + stats = filter2.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsSpecialChars() throws Exception { @@ -709,57 +741,58 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS statsNameBuilder.append(randomAlphaOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", "")); String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + ) ) - ) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "more" is 2 - // the max for "less" is 4 - - StringTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Stats stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "more" is 2 + // the max for "less" is 4 + + StringTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Stats stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsSpecialCharsNoDotNotation() throws Exception { @@ -770,57 +803,58 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS statsNameBuilder.append(randomAlphaOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", "")); String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + ) ) - ) - ).get(); - - assertNoFailures(response); - - StringTerms tags = response.getAggregations().get("tags"); - assertThat(tags, notNullValue()); - assertThat(tags.getName(), equalTo("tags")); - assertThat(tags.getBuckets().size(), equalTo(2)); - - Iterator iters = tags.getBuckets().iterator(); - - // the max for "more" is 2 - // the max for "less" is 4 - - StringTerms.Bucket tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); - assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); - Filter filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); - Stats stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); - - tag = iters.next(); - assertThat(tag, notNullValue()); - assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); - assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); - filter1 = tag.getAggregations().get("filter1"); - assertThat(filter1, notNullValue()); - assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); - filter2 = filter1.getAggregations().get(filter2Name); - assertThat(filter2, notNullValue()); - assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); - stats = filter2.getAggregations().get(statsName); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + ), + response -> { + StringTerms tags = response.getAggregations().get("tags"); + assertThat(tags, notNullValue()); + assertThat(tags.getName(), equalTo("tags")); + assertThat(tags.getBuckets().size(), equalTo(2)); + + Iterator iters = tags.getBuckets().iterator(); + + // the max for "more" is 2 + // the max for "less" is 4 + + StringTerms.Bucket tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "more" : "less")); + assertThat(tag.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 3L : 2L)); + Filter filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 3L : 2L)); + Stats stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0)); + + tag = iters.next(); + assertThat(tag, notNullValue()); + assertThat(tag.getKeyAsString(), equalTo(asc ? "less" : "more")); + assertThat(tag.getDocCount(), equalTo(asc ? 2L : 3L)); + filter1 = tag.getAggregations().get("filter1"); + assertThat(filter1, notNullValue()); + assertThat(filter1.getDocCount(), equalTo(asc ? 2L : 3L)); + filter2 = filter1.getAggregations().get(filter2Name); + assertThat(filter2, notNullValue()); + assertThat(filter2.getDocCount(), equalTo(asc ? 2L : 3L)); + stats = filter2.getAggregations().get(statsName); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0)); + } + ); } public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Exception { @@ -868,7 +902,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMetric() throws Exception { for (String index : Arrays.asList("idx", "idx_unmapped")) { try { - SearchResponse response = prepareSearch(index).addAggregation( + prepareSearch(index).addAggregation( new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) @@ -877,9 +911,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMe ).get(); fail( "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " - + "with an unknown specified metric to order by. response had " - + response.getFailedShards() - + " failed shards." + + "with an unknown specified metric to order by" ); } catch (ElasticsearchException e) { @@ -897,7 +929,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.aggregation("stats", true)) .subAggregation(stats("stats").field("i")) - ).execute().actionGet(); + ).get(); fail( "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " @@ -912,139 +944,140 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field("i")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field("i")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field("i")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 4; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - i--; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field("i")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 4; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + i--; + } + } + ); } public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) - .subAggregation(extendedStats("stats").field("i")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - i++; - } - + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) + .subAggregation(extendedStats("stats").field("i")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + i++; + } + } + ); } public void testSingleValuedFieldOrderedByStatsAggAscWithTermsSubAgg() throws Exception { boolean asc = true; - SearchResponse response = prepareSearch("idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) - .subAggregation(extendedStats("stats").field("i")) - .subAggregation( - new TermsAggregationBuilder("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(1L)); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMax(), equalTo((double) i)); - - StringTerms subTermsAgg = bucket.getAggregations().get("subTerms"); - assertThat(subTermsAgg, notNullValue()); - assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); - int j = i; - for (StringTerms.Bucket subBucket : subTermsAgg.getBuckets()) { - assertThat(subBucket, notNullValue()); - assertThat(subBucket.getKeyAsString(), equalTo("val" + j)); - assertThat(subBucket.getDocCount(), equalTo(1L)); - j++; + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) + .subAggregation(extendedStats("stats").field("i")) + .subAggregation( + new TermsAggregationBuilder("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMax(), equalTo((double) i)); + + StringTerms subTermsAgg = bucket.getAggregations().get("subTerms"); + assertThat(subTermsAgg, notNullValue()); + assertThat(subTermsAgg.getBuckets().size(), equalTo(2)); + int j = i; + for (StringTerms.Bucket subBucket : subTermsAgg.getBuckets()) { + assertThat(subBucket, notNullValue()); + assertThat(subBucket.getKeyAsString(), equalTo("val" + j)); + assertThat(subBucket.getDocCount(), equalTo(1L)); + j++; + } + i++; + } } - i++; - } - + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { @@ -1088,57 +1121,60 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order) { - SearchResponse response = prepareSearch("sort_idx").addAggregation( - new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); - - assertNoFailures(response); - - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(expectedKeys[i])); - assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); - Avg avg = bucket.getAggregations().get("avg_l"); - assertThat(avg, notNullValue()); - assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); - Sum sum = bucket.getAggregations().get("sum_d"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("sort_idx").addAggregation( + new TermsAggregationBuilder("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + ); } public void testIndexMetaField() throws Exception { - SearchResponse response = prepareSearch("idx", "empty_bucket_idx").addAggregation( - new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .field(IndexFieldMapper.NAME) - ).get(); - - assertNoFailures(response); - StringTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(2)); - - int i = 0; - for (StringTerms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(i == 0 ? "idx" : "empty_bucket_idx")); - assertThat(bucket.getDocCount(), equalTo(i == 0 ? 5L : 2L)); - i++; - } + assertNoFailuresAndResponse( + prepareSearch("idx", "empty_bucket_idx").addAggregation( + new TermsAggregationBuilder("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .field(IndexFieldMapper.NAME) + ), + response -> { + StringTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(2)); + + int i = 0; + for (StringTerms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(i == 0 ? "idx" : "empty_bucket_idx")); + assertThat(bucket.getDocCount(), equalTo(i == 0 ? 5L : 2L)); + i++; + } + } + ); } public void testOtherDocCount() { @@ -1156,8 +1192,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", "foo"), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", "bar") + prepareIndex("cache_test_idx").setId("1").setSource("s", "foo"), + prepareIndex("cache_test_idx").setId("2").setSource("s", "bar") ); // Make sure we are starting with a clear cache @@ -1171,13 +1207,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1189,13 +1225,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - new TermsAggregationBuilder("terms").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + new TermsAggregationBuilder("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1207,8 +1243,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(new TermsAggregationBuilder("terms").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1230,13 +1265,12 @@ public void testScriptWithValueType() throws Exception { String source = builder.toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { - SearchResponse response = prepareSearch("idx").setSource(new SearchSourceBuilder().parseXContent(parser, true)).get(); - - assertNoFailures(response); - LongTerms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(1)); + assertNoFailuresAndResponse(prepareSearch("idx").setSource(new SearchSourceBuilder().parseXContent(parser, true)), response -> { + LongTerms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(1)); + }); } String invalidValueType = source.replaceAll("\"value_type\":\"n.*\"", "\"value_type\":\"foobar\""); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 44361587dd09e..8b1fa4abe09a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -37,8 +37,7 @@ public void testRequestBreaker() throws Exception { true, IntStream.range(0, randomIntBetween(10, 1000)) .mapToObj( - i -> client().prepareIndex("test") - .setId("id_" + i) + i -> prepareIndex("test").setId("id_" + i) .setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5))) ) .toArray(IndexRequestBuilder[]::new) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 64a97bf0f6f16..e15ad15bb4e3a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -39,6 +38,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -91,310 +91,325 @@ private static double varianceSampling(int... vals) { @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(extendedStats("stats").field("value")) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - ExtendedStats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getSumOfSquares(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); - assertThat(Double.isNaN(stats.getAvg()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(extendedStats("stats").field("value")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + ExtendedStats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getSumOfSquares(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); + assertThat(Double.isNaN(stats.getAvg()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + } + ); } @Override public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo(Double.NaN)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSumOfSquares(), equalTo(0.0)); - assertThat(stats.getVariance(), equalTo(Double.NaN)); - assertThat(stats.getVariancePopulation(), equalTo(Double.NaN)); - assertThat(stats.getVarianceSampling(), equalTo(Double.NaN)); - assertThat(stats.getStdDeviation(), equalTo(Double.NaN)); - assertThat(stats.getStdDeviationPopulation(), equalTo(Double.NaN)); - assertThat(stats.getStdDeviationSampling(), equalTo(Double.NaN)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("value")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo(Double.NaN)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSumOfSquares(), equalTo(0.0)); + assertThat(stats.getVariance(), equalTo(Double.NaN)); + assertThat(stats.getVariancePopulation(), equalTo(Double.NaN)); + assertThat(stats.getVarianceSampling(), equalTo(Double.NaN)); + assertThat(stats.getStdDeviation(), equalTo(Double.NaN)); + assertThat(stats.getStdDeviationPopulation(), equalTo(Double.NaN)); + assertThat(stats.getStdDeviationSampling(), equalTo(Double.NaN)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + } + ); } public void testPartiallyUnmapped() { double sigma = randomDouble() * 5; - ExtendedStats s1 = prepareSearch("idx").addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get() - .getAggregations() - .get("stats"); - ExtendedStats s2 = prepareSearch("idx", "idx_unmapped").addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get() - .getAggregations() - .get("stats"); - assertEquals(s1.getAvg(), s2.getAvg(), 1e-10); - assertEquals(s1.getCount(), s2.getCount()); - assertEquals(s1.getMin(), s2.getMin(), 0d); - assertEquals(s1.getMax(), s2.getMax(), 0d); - assertEquals(s1.getStdDeviation(), s2.getStdDeviation(), 1e-10); - assertEquals(s1.getStdDeviationPopulation(), s2.getStdDeviationPopulation(), 1e-10); - assertEquals(s1.getStdDeviationSampling(), s2.getStdDeviationSampling(), 1e-10); - assertEquals(s1.getSumOfSquares(), s2.getSumOfSquares(), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.LOWER), s2.getStdDeviationBound(Bounds.LOWER), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.UPPER), s2.getStdDeviationBound(Bounds.UPPER), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.LOWER_POPULATION), s2.getStdDeviationBound(Bounds.LOWER_POPULATION), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.UPPER_POPULATION), s2.getStdDeviationBound(Bounds.UPPER_POPULATION), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.LOWER_SAMPLING), s2.getStdDeviationBound(Bounds.LOWER_SAMPLING), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.UPPER_SAMPLING), s2.getStdDeviationBound(Bounds.UPPER_SAMPLING), 1e-10); + assertResponse(prepareSearch("idx").addAggregation(extendedStats("stats").field("value").sigma(sigma)), response1 -> { + ExtendedStats s1 = response1.getAggregations().get("stats"); + assertResponse( + prepareSearch("idx", "idx_unmapped").addAggregation(extendedStats("stats").field("value").sigma(sigma)), + response2 -> { + ExtendedStats s2 = response2.getAggregations().get("stats"); + assertEquals(s1.getAvg(), s2.getAvg(), 1e-10); + assertEquals(s1.getCount(), s2.getCount()); + assertEquals(s1.getMin(), s2.getMin(), 0d); + assertEquals(s1.getMax(), s2.getMax(), 0d); + assertEquals(s1.getStdDeviation(), s2.getStdDeviation(), 1e-10); + assertEquals(s1.getStdDeviationPopulation(), s2.getStdDeviationPopulation(), 1e-10); + assertEquals(s1.getStdDeviationSampling(), s2.getStdDeviationSampling(), 1e-10); + assertEquals(s1.getSumOfSquares(), s2.getSumOfSquares(), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.LOWER), s2.getStdDeviationBound(Bounds.LOWER), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.UPPER), s2.getStdDeviationBound(Bounds.UPPER), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.LOWER_POPULATION), s2.getStdDeviationBound(Bounds.LOWER_POPULATION), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.UPPER_POPULATION), s2.getStdDeviationBound(Bounds.UPPER_POPULATION), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.LOWER_SAMPLING), s2.getStdDeviationBound(Bounds.LOWER_SAMPLING), 1e-10); + assertEquals(s1.getStdDeviationBound(Bounds.UPPER_SAMPLING), s2.getStdDeviationBound(Bounds.UPPER_SAMPLING), 1e-10); + } + ); + }); } @Override public void testSingleValuedField() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("value").sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, sigma); + } + ); } public void testSingleValuedFieldDefaultSigma() throws Exception { // Same as previous test, but uses a default value for sigma - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, 2); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("value")), response -> { + + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, 2); + }); } public void testSingleValuedField_WithFormatter() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").format("0000.0").field("value").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getAvgAsString(), equalTo("0005.5")); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMinAsString(), equalTo("0001.0")); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getMaxAsString(), equalTo("0010.0")); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getSumAsString(), equalTo("0055.0")); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getSumOfSquaresAsString(), equalTo("0385.0")); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceAsString(), equalTo("0008.2")); - assertThat(stats.getVariancePopulationAsString(), equalTo("0008.2")); - assertThat(stats.getVarianceSamplingAsString(), equalTo("0009.2")); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationAsString(), equalTo("0002.9")); - assertThat(stats.getStdDeviationPopulationAsString(), equalTo("0002.9")); - assertThat(stats.getStdDeviationSamplingAsString(), equalTo("0003.0")); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").format("0000.0").field("value").sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getAvgAsString(), equalTo("0005.5")); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMinAsString(), equalTo("0001.0")); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getMaxAsString(), equalTo("0010.0")); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getSumAsString(), equalTo("0055.0")); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getSumOfSquaresAsString(), equalTo("0385.0")); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceAsString(), equalTo("0008.2")); + assertThat(stats.getVariancePopulationAsString(), equalTo("0008.2")); + assertThat(stats.getVarianceSamplingAsString(), equalTo("0009.2")); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationAsString(), equalTo("0002.9")); + assertThat(stats.getStdDeviationPopulationAsString(), equalTo("0002.9")); + assertThat(stats.getStdDeviationSamplingAsString(), equalTo("0003.0")); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(extendedStats("stats").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - ExtendedStats stats = global.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - ExtendedStats statsFromProperty = (ExtendedStats) ((InternalAggregation) global).getProperty("stats"); - assertThat(statsFromProperty, notNullValue()); - assertThat(statsFromProperty, sameInstance(stats)); - double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; - assertThat(stats.getAvg(), equalTo(expectedAvgValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); - double expectedMinValue = 1.0; - assertThat(stats.getMin(), equalTo(expectedMinValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); - double expectedMaxValue = 10.0; - assertThat(stats.getMax(), equalTo(expectedMaxValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); - double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; - assertThat(stats.getSum(), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); - long expectedCountValue = 10; - assertThat(stats.getCount(), equalTo(expectedCountValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); - double expectedSumOfSquaresValue = (double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100; - assertThat(stats.getSumOfSquares(), equalTo(expectedSumOfSquaresValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.sum_of_squares"), equalTo(expectedSumOfSquaresValue)); - double expectedVarianceValue = variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getVariance(), equalTo(expectedVarianceValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.variance"), equalTo(expectedVarianceValue)); - double expectedVariancePopulationValue = variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getVariancePopulation(), equalTo(expectedVariancePopulationValue)); - assertThat( - (double) ((InternalAggregation) global).getProperty("stats.variance_population"), - equalTo(expectedVariancePopulationValue) - ); - double expectedVarianceSamplingValue = varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getVarianceSampling(), equalTo(expectedVarianceSamplingValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.variance_sampling"), equalTo(expectedVarianceSamplingValue)); - double expectedStdDevValue = stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getStdDeviation(), equalTo(expectedStdDevValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.std_deviation"), equalTo(expectedStdDevValue)); - double expectedStdDevPopulationValue = stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getStdDeviationPopulation(), equalTo(expectedStdDevValue)); - assertThat( - (double) ((InternalAggregation) global).getProperty("stats.std_deviation_population"), - equalTo(expectedStdDevPopulationValue) - ); - double expectedStdDevSamplingValue = stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); - assertThat(stats.getStdDeviationSampling(), equalTo(expectedStdDevSamplingValue)); - assertThat( - (double) ((InternalAggregation) global).getProperty("stats.std_deviation_sampling"), - equalTo(expectedStdDevSamplingValue) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(extendedStats("stats").field("value"))), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + ExtendedStats stats = global.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + ExtendedStats statsFromProperty = (ExtendedStats) ((InternalAggregation) global).getProperty("stats"); + assertThat(statsFromProperty, notNullValue()); + assertThat(statsFromProperty, sameInstance(stats)); + double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; + assertThat(stats.getAvg(), equalTo(expectedAvgValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); + double expectedMinValue = 1.0; + assertThat(stats.getMin(), equalTo(expectedMinValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); + double expectedMaxValue = 10.0; + assertThat(stats.getMax(), equalTo(expectedMaxValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); + double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; + assertThat(stats.getSum(), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); + long expectedCountValue = 10; + assertThat(stats.getCount(), equalTo(expectedCountValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + double expectedSumOfSquaresValue = (double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100; + assertThat(stats.getSumOfSquares(), equalTo(expectedSumOfSquaresValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum_of_squares"), equalTo(expectedSumOfSquaresValue)); + double expectedVarianceValue = variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getVariance(), equalTo(expectedVarianceValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.variance"), equalTo(expectedVarianceValue)); + double expectedVariancePopulationValue = variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getVariancePopulation(), equalTo(expectedVariancePopulationValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.variance_population"), + equalTo(expectedVariancePopulationValue) + ); + double expectedVarianceSamplingValue = varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getVarianceSampling(), equalTo(expectedVarianceSamplingValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.variance_sampling"), + equalTo(expectedVarianceSamplingValue) + ); + double expectedStdDevValue = stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getStdDeviation(), equalTo(expectedStdDevValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.std_deviation"), equalTo(expectedStdDevValue)); + double expectedStdDevPopulationValue = stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getStdDeviationPopulation(), equalTo(expectedStdDevValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.std_deviation_population"), + equalTo(expectedStdDevPopulationValue) + ); + double expectedStdDevSamplingValue = stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + assertThat(stats.getStdDeviationSampling(), equalTo(expectedStdDevSamplingValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.std_deviation_sampling"), + equalTo(expectedStdDevSamplingValue) + ); + } ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").field("value").sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override @@ -402,118 +417,139 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("inc", 1); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testMultiValuedField() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("values").sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").field("values").sigma(sigma)), + response -> { + + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(12.0)); + assertThat( + stats.getSum(), + equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) + ); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) - ); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat( - stats.getStdDeviationSampling(), - equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - checkUpperLowerBounds(stats, sigma); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", Collections.emptyMap())) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", Collections.emptyMap())) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + ); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat( + stats.getSum(), + equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) - ); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - - checkUpperLowerBounds(stats, sigma); } @Override @@ -521,75 +557,88 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + ); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat( + stats.getSum(), + equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) - ); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) - ); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); } @Override public void testScriptSingleValued() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) - ).sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) + ).sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override @@ -600,74 +649,83 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script(script).sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); - assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); - checkUpperLowerBounds(stats, sigma); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").script(script).sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(11.0)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); + assertThat(stats.getCount(), equalTo(10L)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + checkUpperLowerBounds(stats, sigma); + } + ); } @Override public void testScriptMultiValued() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats").script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) - ).sigma(sigma) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) - ); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) - ); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getVariancePopulation(), - equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) - ); - assertThat( - stats.getStdDeviationSampling(), - equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) + ).sigma(sigma) + ), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(12.0)); + assertThat( + stats.getSum(), + equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) + ); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - checkUpperLowerBounds(stats, sigma); } @Override @@ -683,125 +741,147 @@ public void testScriptMultiValuedWithParams() throws Exception { ); double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").script(script).sigma(sigma)) - .get(); - - assertHitCount(searchResponse, 10); - - ExtendedStats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) / 20)); - assertThat(stats.getMin(), equalTo(0.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9)); - assertThat(stats.getCount(), equalTo(20L)); - assertThat( - stats.getSumOfSquares(), - equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 0 + 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81) - ); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - assertThat( - stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(extendedStats("stats").script(script).sigma(sigma)), + response -> { + assertHitCount(response, 10); + + ExtendedStats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) / 20) + ); + assertThat(stats.getMin(), equalTo(0.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat( + stats.getSum(), + equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) + ); + assertThat(stats.getCount(), equalTo(20L)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 0 + 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + assertThat( + stats.getVarianceSampling(), + equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + checkUpperLowerBounds(stats, sigma); + } ); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); - checkUpperLowerBounds(stats, sigma); } public void testEmptySubAggregation() { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("value").field("value") - .subAggregation(missing("values").field("values").subAggregation(extendedStats("stats").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("value"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), equalTo(10)); - - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket.getDocCount(), equalTo(1L)); - - Missing missing = bucket.getAggregations().get("values"); - assertThat(missing, notNullValue()); - assertThat(missing.getDocCount(), equalTo(0L)); - - ExtendedStats stats = missing.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getSumOfSquares(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); - assertThat(Double.isNaN(stats.getAvg()), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("value").field("value") + .subAggregation(missing("values").field("values").subAggregation(extendedStats("stats").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("value"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(10)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket.getDocCount(), equalTo(1L)); + + Missing missing = bucket.getAggregations().get("values"); + assertThat(missing, notNullValue()); + assertThat(missing.getDocCount(), equalTo(0L)); + + ExtendedStats stats = missing.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getSumOfSquares(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationPopulation()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationSampling()), is(true)); + assertThat(Double.isNaN(stats.getAvg()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + } + } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>extendedStats.avg", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - ExtendedStats extendedStats = filter.getAggregations().get("extendedStats"); - assertThat(extendedStats, notNullValue()); - assertThat(extendedStats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(extendedStats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(extendedStats.getAvg(), equalTo(Double.NaN)); - assertThat(extendedStats.getSum(), equalTo(0.0)); - assertThat(extendedStats.getCount(), equalTo(0L)); - assertThat(extendedStats.getStdDeviation(), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationPopulation(), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationSampling(), equalTo(Double.NaN)); - assertThat(extendedStats.getSumOfSquares(), equalTo(0.0)); - assertThat(extendedStats.getVariance(), equalTo(Double.NaN)); - assertThat(extendedStats.getVariancePopulation(), equalTo(Double.NaN)); - assertThat(extendedStats.getVarianceSampling(), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_POPULATION), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_POPULATION), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_SAMPLING), equalTo(Double.NaN)); - assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_SAMPLING), equalTo(Double.NaN)); - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>extendedStats.avg", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value")) + ) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + ExtendedStats extendedStats = filter.getAggregations().get("extendedStats"); + assertThat(extendedStats, notNullValue()); + assertThat(extendedStats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(extendedStats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(extendedStats.getAvg(), equalTo(Double.NaN)); + assertThat(extendedStats.getSum(), equalTo(0.0)); + assertThat(extendedStats.getCount(), equalTo(0L)); + assertThat(extendedStats.getStdDeviation(), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationPopulation(), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationSampling(), equalTo(Double.NaN)); + assertThat(extendedStats.getSumOfSquares(), equalTo(0.0)); + assertThat(extendedStats.getVariance(), equalTo(Double.NaN)); + assertThat(extendedStats.getVariancePopulation(), equalTo(Double.NaN)); + assertThat(extendedStats.getVarianceSampling(), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_POPULATION), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_POPULATION), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER_SAMPLING), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER_SAMPLING), equalTo(Double.NaN)); + } + } + ); } private void checkUpperLowerBounds(ExtendedStats stats, double sigma) { @@ -830,8 +910,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -845,13 +925,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - extendedStats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + extendedStats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -863,13 +943,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - extendedStats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + extendedStats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -881,8 +961,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(extendedStats("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(extendedStats("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -893,5 +972,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java index 3aebbce43e1e1..f8b633dca1a10 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java @@ -8,13 +8,12 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.geo.RandomGeoGenerator; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -26,45 +25,42 @@ public class GeoBoundsIT extends SpatialBoundsAggregationTestBase { public void testSingleValuedFieldNearDateLine() { - SearchResponse response = prepareSearch(DATELINE_IDX_NAME).addAggregation( - boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(false) - ).get(); - - assertNoFailures(response); - - GeoPoint geoValuesTopLeft = new GeoPoint(38, -179); - GeoPoint geoValuesBottomRight = new GeoPoint(-24, 178); - - GeoBounds geoBounds = response.getAggregations().get(aggName()); - assertThat(geoBounds, notNullValue()); - assertThat(geoBounds.getName(), equalTo(aggName())); - GeoPoint topLeft = geoBounds.topLeft(); - GeoPoint bottomRight = geoBounds.bottomRight(); - assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); - assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + assertNoFailuresAndResponse( + prepareSearch(DATELINE_IDX_NAME).addAggregation(boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)), + response -> { + GeoPoint geoValuesTopLeft = new GeoPoint(38, -179); + GeoPoint geoValuesBottomRight = new GeoPoint(-24, 178); + + GeoBounds geoBounds = response.getAggregations().get(aggName()); + assertThat(geoBounds, notNullValue()); + assertThat(geoBounds.getName(), equalTo(aggName())); + GeoPoint topLeft = geoBounds.topLeft(); + GeoPoint bottomRight = geoBounds.bottomRight(); + assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); + assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + } + ); } public void testSingleValuedFieldNearDateLineWrapLongitude() { - GeoPoint geoValuesTopLeft = new GeoPoint(38, 170); GeoPoint geoValuesBottomRight = new GeoPoint(-24, -175); - SearchResponse response = prepareSearch(DATELINE_IDX_NAME).addAggregation( - boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(true) - ).get(); - - assertNoFailures(response); - - GeoBounds geoBounds = response.getAggregations().get(aggName()); - assertThat(geoBounds, notNullValue()); - assertThat(geoBounds.getName(), equalTo(aggName())); - GeoPoint topLeft = geoBounds.topLeft(); - GeoPoint bottomRight = geoBounds.bottomRight(); - assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); - assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); - assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + assertNoFailuresAndResponse( + prepareSearch(DATELINE_IDX_NAME).addAggregation(boundsAgg(aggName(), SINGLE_VALUED_FIELD_NAME).wrapLongitude(true)), + response -> { + GeoBounds geoBounds = response.getAggregations().get(aggName()); + assertThat(geoBounds, notNullValue()); + assertThat(geoBounds.getName(), equalTo(aggName())); + GeoPoint topLeft = geoBounds.topLeft(); + GeoPoint bottomRight = geoBounds.bottomRight(); + assertThat(topLeft.getY(), closeTo(geoValuesTopLeft.getY(), GEOHASH_TOLERANCE)); + assertThat(topLeft.getX(), closeTo(geoValuesTopLeft.getX(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getY(), closeTo(geoValuesBottomRight.getY(), GEOHASH_TOLERANCE)); + assertThat(bottomRight.getX(), closeTo(geoValuesBottomRight.getX(), GEOHASH_TOLERANCE)); + } + ); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java index 4b12cddde691f..a7d32863718e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGrid; @@ -18,7 +17,7 @@ import java.util.List; import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -29,21 +28,24 @@ public class GeoCentroidIT extends CentroidAggregationTestBase { public void testSingleValueFieldAsSubAggToGeohashGrid() { - SearchResponse response = prepareSearch(HIGH_CARD_IDX_NAME).addAggregation( - geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME).subAggregation(centroidAgg(aggName()).field(SINGLE_VALUED_FIELD_NAME)) - ).get(); - assertNoFailures(response); - - GeoGrid grid = response.getAggregations().get("geoGrid"); - assertThat(grid, notNullValue()); - assertThat(grid.getName(), equalTo("geoGrid")); - List buckets = grid.getBuckets(); - for (GeoGrid.Bucket cell : buckets) { - String geohash = cell.getKeyAsString(); - SpatialPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); - GeoCentroid centroidAgg = cell.getAggregations().get(aggName()); - assertSameCentroid(centroidAgg.centroid(), expectedCentroid); - } + assertNoFailuresAndResponse( + prepareSearch(HIGH_CARD_IDX_NAME).addAggregation( + geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME) + .subAggregation(centroidAgg(aggName()).field(SINGLE_VALUED_FIELD_NAME)) + ), + response -> { + GeoGrid grid = response.getAggregations().get("geoGrid"); + assertThat(grid, notNullValue()); + assertThat(grid.getName(), equalTo("geoGrid")); + List buckets = grid.getBuckets(); + for (GeoGrid.Bucket cell : buckets) { + String geohash = cell.getKeyAsString(); + SpatialPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); + GeoCentroid centroidAgg = cell.getAggregations().get(aggName()); + assertSameCentroid(centroidAgg.centroid(), expectedCentroid); + } + } + ); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 7d5e446d591bb..d263c14fe4710 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -40,6 +39,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -99,70 +99,76 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal @Override public void testEmptyAggregation() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value") - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - ) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + } + ); } @Override public void testUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 }).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - PercentileRanks reversePercentiles = searchResponse.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 }).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + PercentileRanks reversePercentiles = response.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } public void testNullValuesField() throws Exception { @@ -201,84 +207,91 @@ public void testEmptyValuesField() throws Exception { public void testSingleValuedFieldGetProperty() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - PercentileRanks values = global.getAggregations().get("percentile_ranks"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("percentile_ranks")); - assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); - + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + PercentileRanks values = global.getAggregations().get("percentile_ranks"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("percentile_ranks")); + assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); + } + ); } public void testSingleValuedFieldOutsideRange() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = new double[] { minValue - 1, maxValue + 1 }; - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -287,74 +300,82 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override public void testMultiValuedField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValues, maxValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues, maxValues, sigDigits); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(20 - maxValues, 20 - minValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, 20 - maxValues, 20 - minValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, 20 - maxValues, 20 - minValues, sigDigits); + } + ); } @Override @@ -363,37 +384,41 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + } + ); } @Override public void testScriptSingleValued() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue, maxValue, sigDigits); + } + ); } @Override @@ -405,18 +430,20 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -426,18 +453,20 @@ public void testScriptMultiValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script) - ) - .get(); - - assertHitCount(searchResponse, 10); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ), + response -> { + assertHitCount(response, 10); - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues, sigDigits); + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues, maxValues, sigDigits); + } + ); } @Override @@ -446,87 +475,93 @@ public void testScriptMultiValuedWithParams() throws Exception { Script script = AggregationTestScriptsPlugin.DECREMENT_ALL_VALUES; final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1, maxValues - 1, sigDigits); + } + ); } public void testOrderBySubAggregation() { int sigDigits = randomSignificantDigits(); boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation( - percentileRanks("percentile_ranks", new double[] { 99 }).field("value") - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - ) - .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); - double p99 = values.percent(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation( + percentileRanks("percentile_ranks", new double[] { 99 }).field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + ) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); + double p99 = values.percent(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.HDR).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.HDR).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - PercentileRanks ranks = filter.getAggregations().get("ranks"); - assertThat(ranks, notNullValue()); - assertThat(ranks.percent(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + PercentileRanks ranks = filter.getAggregations().get("ranks"); + assertThat(ranks, notNullValue()); + assertThat(ranks.percent(99), equalTo(Double.NaN)); + + } + } + ); } /** @@ -540,8 +575,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -555,14 +590,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) + .field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -574,14 +609,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) + .field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -593,10 +628,10 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR).field("d")) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR).field("d")) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -607,5 +642,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 3ac50c7b5e104..0dbc811a7debc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -42,6 +41,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -102,143 +102,154 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa @Override public void testEmptyAggregation() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - percentiles("percentiles").field("value") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .percentiles(10, 15) - ) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + percentiles("percentiles").field("value") + .numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .percentiles(10, 15) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + } + ); } @Override public void testUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .percentiles(0, 10, 15, 100) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(0), equalTo(Double.NaN)); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); - assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(0, 10, 15, 100) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + Percentiles percentiles = response.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(0), equalTo(Double.NaN)); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomIntBetween(1, 5); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); - } - - @Override - public void testSingleValuedFieldGetProperty() throws Exception { - final double[] pcts = randomPercentiles(); - int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) .method(PercentilesMethod.HDR) .field("value") .percentiles(pcts) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + ), + response -> { + assertHitCount(response, 10); - Percentiles percentiles = global.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + } + ); + } + @Override + public void testSingleValuedFieldGetProperty() throws Exception { + final double[] pcts = randomPercentiles(); + int sigDigits = randomSignificantDigits(); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(pcts) + ) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Percentiles percentiles = global.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -248,78 +259,86 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override public void testMultiValuedField() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, 20 - maxValues, 20 - minValues, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, 20 - maxValues, 20 - minValues, sigDigits); + } + ); } @Override @@ -329,39 +348,43 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + } + ); } @Override public void testScriptSingleValued() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue, sigDigits); + } + ); } @Override @@ -373,19 +396,21 @@ public void testScriptSingleValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1, sigDigits); + } + ); } @Override @@ -395,19 +420,21 @@ public void testScriptMultiValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues, sigDigits); + } + ); } @Override @@ -416,89 +443,96 @@ public void testScriptMultiValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1, sigDigits); + + } + ); } public void testOrderBySubAggregation() { int sigDigits = randomSignificantDigits(); boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation( - percentiles("percentiles").field("value") - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .percentiles(99) - ) - .order(BucketOrder.aggregation("percentiles", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - double p99 = percentiles.percentile(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation( + percentiles("percentiles").field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .percentiles(99) + ) + .order(BucketOrder.aggregation("percentiles", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + double p99 = percentiles.percentile(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentiles("percentiles").method(PercentilesMethod.HDR).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentiles("percentiles").method(PercentilesMethod.HDR).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Percentiles percentiles = filter.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.percentile(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Percentiles percentiles = filter.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.percentile(99), equalTo(Double.NaN)); + + } + } + ); } /** @@ -512,8 +546,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -527,15 +561,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").method(PercentilesMethod.HDR) - .field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").method(PercentilesMethod.HDR) + .field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -547,15 +581,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").method(PercentilesMethod.HDR) - .field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").method(PercentilesMethod.HDR) + .field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -567,10 +601,10 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -581,5 +615,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index dae90424495a3..06f43416eb03a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -46,6 +45,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -89,8 +89,7 @@ public void setupSuiteScopeCluster() throws Exception { multiValueSample[i * 2] = firstMultiValueDatapoint; multiValueSample[(i * 2) + 1] = secondMultiValueDatapoint; - IndexRequestBuilder builder = client().prepareIndex("idx") - .setId(String.valueOf(i)) + IndexRequestBuilder builder = prepareIndex("idx").setId(String.valueOf(i)) .setSource( jsonBuilder().startObject() .field("value", singleValueDatapoint) @@ -114,8 +113,7 @@ public void setupSuiteScopeCluster() throws Exception { builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId(String.valueOf(i)) + prepareIndex("empty_bucket_idx").setId(String.valueOf(i)) .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } @@ -138,21 +136,24 @@ private static MedianAbsoluteDeviationAggregationBuilder randomBuilder() { @Override public void testEmptyAggregation() throws Exception { - final SearchResponse response = prepareSearch("empty_bucket_idx").addAggregation( - histogram("histogram").field("value").interval(1).minDocCount(0).subAggregation(randomBuilder().field("value")) - ).get(); - - assertHitCount(response, 2); - - final Histogram histogram = response.getAggregations().get("histogram"); - assertThat(histogram, notNullValue()); - final Histogram.Bucket bucket = histogram.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - final MedianAbsoluteDeviation mad = bucket.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), is(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").addAggregation( + histogram("histogram").field("value").interval(1).minDocCount(0).subAggregation(randomBuilder().field("value")) + ), + response -> { + assertHitCount(response, 2); + + final Histogram histogram = response.getAggregations().get("histogram"); + assertThat(histogram, notNullValue()); + final Histogram.Bucket bucket = histogram.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + final MedianAbsoluteDeviation mad = bucket.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), is(Double.NaN)); + } + ); } @Override @@ -162,68 +163,72 @@ public void testUnmapped() throws Exception { @Override public void testSingleValuedField() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("value")).get(); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("value")), response -> { + assertHitCount(response, NUMBER_OF_DOCS); - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + }); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(randomBuilder().field("value"))) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final Global global = response.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), is("global")); - assertThat(global.getDocCount(), is((long) NUMBER_OF_DOCS)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().entrySet(), hasSize(1)); - - final MedianAbsoluteDeviation mad = global.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(((InternalAggregation) global).getProperty("mad"), sameInstance(mad)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(randomBuilder().field("value"))), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), is("global")); + assertThat(global.getDocCount(), is((long) NUMBER_OF_DOCS)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().entrySet(), hasSize(1)); + + final MedianAbsoluteDeviation mad = global.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(((InternalAggregation) global).getProperty("mad"), sameInstance(mad)); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { - final SearchResponse response = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomBuilder().field("value")) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("value")), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override @@ -231,53 +236,55 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override public void testMultiValuedField() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomBuilder().field("values")) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(randomBuilder().field("values")), response -> { + assertHitCount(response, NUMBER_OF_DOCS); - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + }); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override @@ -285,38 +292,42 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override public void testScriptSingleValued() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) - ) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + } + ); } @Override @@ -324,38 +335,44 @@ public void testScriptSingleValuedWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params)) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + ); } @Override public void testScriptMultiValued() throws Exception { - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) - ) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + } + ); } @Override @@ -363,107 +380,112 @@ public void testScriptMultiValuedWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomBuilder().script( - new Script( - ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, - "[ doc['value'].value, doc['value'].value + inc ]", - params + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomBuilder().script( + new Script( + ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, + "[ doc['value'].value, doc['value'].value + inc ]", + params + ) ) - ) - ) - .get(); + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); - assertHitCount(response, NUMBER_OF_DOCS); + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); - final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getName(), is("mad")); - - final double fromIncrementedSampleMAD = calculateMAD( - Arrays.stream(singleValueSample).flatMap(point -> LongStream.of(point, point + 1)).toArray() + final double fromIncrementedSampleMAD = calculateMAD( + Arrays.stream(singleValueSample).flatMap(point -> LongStream.of(point, point + 1)).toArray() + ); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } ); - assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } public void testAsSubAggregation() throws Exception { final int rangeBoundary = (MAX_SAMPLE_VALUE + MIN_SAMPLE_VALUE) / 2; - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - range("range").field("value") - .addRange(MIN_SAMPLE_VALUE, rangeBoundary) - .addRange(rangeBoundary, MAX_SAMPLE_VALUE) - .subAggregation(randomBuilder().field("value")) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final long[] lowerBucketSample = Arrays.stream(singleValueSample) - .filter(point -> point >= MIN_SAMPLE_VALUE && point < rangeBoundary) - .toArray(); - final long[] upperBucketSample = Arrays.stream(singleValueSample) - .filter(point -> point >= rangeBoundary && point < MAX_SAMPLE_VALUE) - .toArray(); - - final Range range = response.getAggregations().get("range"); - assertThat(range, notNullValue()); - List buckets = range.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets, hasSize(2)); - - final Range.Bucket lowerBucket = buckets.get(0); - assertThat(lowerBucket, notNullValue()); - - final MedianAbsoluteDeviation lowerBucketMAD = lowerBucket.getAggregations().get("mad"); - assertThat(lowerBucketMAD, notNullValue()); - assertThat(lowerBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(lowerBucketSample))); - - final Range.Bucket upperBucket = buckets.get(1); - assertThat(upperBucket, notNullValue()); - - final MedianAbsoluteDeviation upperBucketMAD = upperBucket.getAggregations().get("mad"); - assertThat(upperBucketMAD, notNullValue()); - assertThat(upperBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(upperBucketSample))); - + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + range("range").field("value") + .addRange(MIN_SAMPLE_VALUE, rangeBoundary) + .addRange(rangeBoundary, MAX_SAMPLE_VALUE) + .subAggregation(randomBuilder().field("value")) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final long[] lowerBucketSample = Arrays.stream(singleValueSample) + .filter(point -> point >= MIN_SAMPLE_VALUE && point < rangeBoundary) + .toArray(); + final long[] upperBucketSample = Arrays.stream(singleValueSample) + .filter(point -> point >= rangeBoundary && point < MAX_SAMPLE_VALUE) + .toArray(); + + final Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + List buckets = range.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets, hasSize(2)); + + final Range.Bucket lowerBucket = buckets.get(0); + assertThat(lowerBucket, notNullValue()); + + final MedianAbsoluteDeviation lowerBucketMAD = lowerBucket.getAggregations().get("mad"); + assertThat(lowerBucketMAD, notNullValue()); + assertThat(lowerBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(lowerBucketSample))); + + final Range.Bucket upperBucket = buckets.get(1); + assertThat(upperBucket, notNullValue()); + + final MedianAbsoluteDeviation upperBucketMAD = upperBucket.getAggregations().get("mad"); + assertThat(upperBucketMAD, notNullValue()); + assertThat(upperBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(upperBucketSample))); + } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { final int numberOfBuckets = 10; - final SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .size(numberOfBuckets) - .order(BucketOrder.compound(BucketOrder.aggregation("filter>mad", true))) - .subAggregation( - filter("filter", termQuery("value", MAX_SAMPLE_VALUE + 1)).subAggregation(randomBuilder().field("value")) - ) - ) - .get(); - - assertHitCount(response, NUMBER_OF_DOCS); - - final Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets, hasSize(numberOfBuckets)); - - for (int i = 0; i < numberOfBuckets; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - - MedianAbsoluteDeviation mad = filter.getAggregations().get("mad"); - assertThat(mad, notNullValue()); - assertThat(mad.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .size(numberOfBuckets) + .order(BucketOrder.compound(BucketOrder.aggregation("filter>mad", true))) + .subAggregation( + filter("filter", termQuery("value", MAX_SAMPLE_VALUE + 1)).subAggregation(randomBuilder().field("value")) + ) + ), + response -> { + assertHitCount(response, NUMBER_OF_DOCS); + + final Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets, hasSize(numberOfBuckets)); + + for (int i = 0; i < numberOfBuckets; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + + MedianAbsoluteDeviation mad = filter.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + } + } + ); } /** @@ -478,8 +500,8 @@ public void testScriptCaching() throws Exception { indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -493,13 +515,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - randomBuilder().field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + randomBuilder().field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -511,13 +533,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - randomBuilder().field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + randomBuilder().field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -529,8 +551,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(randomBuilder().field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(randomBuilder().field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 2ea09960071f9..0ab26e1d9a049 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -50,6 +49,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.scriptedMetric; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -274,8 +274,7 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { builders.add( - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource( jsonBuilder().startObject().field("value", randomAlphaOfLengthBetween(5, 15)).field("l_value", i).endObject() ) @@ -295,9 +294,7 @@ public void setupSuiteScopeCluster() throws Exception { builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) ); } @@ -359,37 +356,39 @@ public void testMap() { Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - int numShardsRun = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Map.class)); - Map map = (Map) object; - assertThat(map.size(), lessThanOrEqualTo(1)); - if (map.size() == 1) { - assertThat(map.get("count"), notNullValue()); - assertThat(map.get("count"), instanceOf(Number.class)); - assertThat(map.get("count"), equalTo(1)); - numShardsRun++; + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(scriptedMetric("scripted").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); + int numShardsRun = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Map.class)); + Map map = (Map) object; + assertThat(map.size(), lessThanOrEqualTo(1)); + if (map.size() == 1) { + assertThat(map.get("count"), notNullValue()); + assertThat(map.get("count"), instanceOf(Number.class)); + assertThat(map.get("count"), equalTo(1)); + numShardsRun++; + } + } + // We don't know how many shards will have documents but we need to make + // sure that at least one shard ran the map script + assertThat(numShardsRun, greaterThan(0)); } - } - // We don't know how many shards will have documents but we need to make - // sure that at least one shard ran the map script - assertThat(numShardsRun, greaterThan(0)); + ); } public void testMapWithParams() { @@ -401,45 +400,47 @@ public void testMapWithParams() { Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(aggregationParams) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - int numShardsRun = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Map.class)); - Map map = (Map) object; - for (Map.Entry entry : map.entrySet()) { - assertThat(entry, notNullValue()); - assertThat(entry.getKey(), notNullValue()); - assertThat(entry.getKey(), instanceOf(String.class)); - assertThat(entry.getValue(), notNullValue()); - assertThat(entry.getValue(), instanceOf(Number.class)); - String stringValue = (String) entry.getKey(); - assertThat(stringValue, equalTo("12")); - Number numberValue = (Number) entry.getValue(); - assertThat(numberValue, equalTo(1)); - numShardsRun++; + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(aggregationParams) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); + int numShardsRun = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Map.class)); + Map map = (Map) object; + for (Map.Entry entry : map.entrySet()) { + assertThat(entry, notNullValue()); + assertThat(entry.getKey(), notNullValue()); + assertThat(entry.getKey(), instanceOf(String.class)); + assertThat(entry.getValue(), notNullValue()); + assertThat(entry.getValue(), instanceOf(Number.class)); + String stringValue = (String) entry.getKey(); + assertThat(stringValue, equalTo("12")); + Number numberValue = (Number) entry.getValue(); + assertThat(numberValue, equalTo(1)); + numShardsRun++; + } + } + assertThat(numShardsRun, greaterThan(0)); } - } - assertThat(numShardsRun, greaterThan(0)); + ); } public void testInitMutatesParams() { @@ -449,47 +450,56 @@ public void testInitMutatesParams() { Map params = new HashMap<>(); params.put("vars", varsMap); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) - .mapScript( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", Collections.emptyMap()) - ) - .combineScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap())) - .reduceScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap())) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(HashMap.class)); - @SuppressWarnings("unchecked") - Map map = (Map) object; - assertThat(map, hasKey("list")); - assertThat(map.get("list"), instanceOf(List.class)); - List list = (List) map.get("list"); - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - assertThat(numberValue, equalTo(3)); - totalCount += numberValue.longValue(); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) + .mapScript( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ) + ) + .combineScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap())) + .reduceScript( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); + long totalCount = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(HashMap.class)); + @SuppressWarnings("unchecked") + Map map = (Map) object; + assertThat(map, hasKey("list")); + assertThat(map.get("list"), instanceOf(List.class)); + List list = (List) map.get("list"); + for (Object o : list) { + assertThat(o, notNullValue()); + assertThat(o, instanceOf(Number.class)); + Number numberValue = (Number) o; + assertThat(numberValue, equalTo(3)); + totalCount += numberValue.longValue(); + } + } + assertThat(totalCount, equalTo(numDocs * 3)); } - } - assertThat(totalCount, equalTo(numDocs * 3)); + ); } public void testMapCombineWithParams() { @@ -508,40 +518,42 @@ public void testMapCombineWithParams() { ); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - // A particular shard may not have any documents stored on it so - // we have to assume the lower bound may be 0. The check at the - // bottom of the test method will make sure the count is correct - assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs))); - totalCount += numberValue.longValue(); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); + long totalCount = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(List.class)); + List list = (List) object; + for (Object o : list) { + assertThat(o, notNullValue()); + assertThat(o, instanceOf(Number.class)); + Number numberValue = (Number) o; + // A particular shard may not have any documents stored on it so + // we have to assume the lower bound may be 0. The check at the + // bottom of the test method will make sure the count is correct + assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs))); + totalCount += numberValue.longValue(); + } + } + assertThat(totalCount, equalTo(numDocs)); } - } - assertThat(totalCount, equalTo(numDocs)); + ); } public void testInitMapCombineWithParams() { @@ -566,44 +578,46 @@ public void testInitMapCombineWithParams() { ); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - // A particular shard may not have any documents stored on it so - // we have to assume the lower bound may be 0. The check at the - // bottom of the test method will make sure the count is correct - assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs * 3))); - totalCount += numberValue.longValue(); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), greaterThanOrEqualTo(getNumShards("idx").numPrimaries)); + long totalCount = 0; + for (Object object : aggregationList) { + assertThat(object, notNullValue()); + assertThat(object, instanceOf(List.class)); + List list = (List) object; + for (Object o : list) { + assertThat(o, notNullValue()); + assertThat(o, instanceOf(Number.class)); + Number numberValue = (Number) o; + // A particular shard may not have any documents stored on it so + // we have to assume the lower bound may be 0. The check at the + // bottom of the test method will make sure the count is correct + assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(numDocs * 3))); + totalCount += numberValue.longValue(); + } + } + assertThat(totalCount, equalTo(numDocs * 3)); } - } - assertThat(totalCount, equalTo(numDocs * 3)); + ); } public void testInitMapCombineReduceWithParams() { @@ -633,31 +647,33 @@ public void testInitMapCombineReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + } + ); } @SuppressWarnings("rawtypes") @@ -688,42 +704,43 @@ public void testInitMapCombineReduceGetProperty() throws Exception { Collections.emptyMap() ); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - ) - .get(); - - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocs)); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(numDocs)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - ScriptedMetric scriptedMetricAggregation = global.getAggregations().get("scripted"); - assertThat(scriptedMetricAggregation, notNullValue()); - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); - assertThat(((InternalAggregation) global).getProperty("scripted"), sameInstance(scriptedMetricAggregation)); - assertThat((List) ((InternalAggregation) global).getProperty("scripted.value"), sameInstance(aggregationList)); - assertThat((List) ((InternalAggregation) scriptedMetricAggregation).getProperty("value"), sameInstance(aggregationList)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(numDocs)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + ScriptedMetric scriptedMetricAggregation = global.getAggregations().get("scripted"); + assertThat(scriptedMetricAggregation, notNullValue()); + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertThat(((InternalAggregation) global).getProperty("scripted"), sameInstance(scriptedMetricAggregation)); + assertThat((List) ((InternalAggregation) global).getProperty("scripted.value"), sameInstance(aggregationList)); + assertThat((List) ((InternalAggregation) scriptedMetricAggregation).getProperty("value"), sameInstance(aggregationList)); + } + ); } public void testMapCombineReduceWithParams() { @@ -752,27 +769,29 @@ public void testMapCombineReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs)); + } + ); } public void testInitMapReduceWithParams() { @@ -797,31 +816,33 @@ public void testInitMapReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + } + ); } public void testMapReduceWithParams() { @@ -844,27 +865,29 @@ public void testMapReduceWithParams() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs)); + } + ); } public void testInitMapCombineReduceWithParamsAndReduceParams() { @@ -897,31 +920,33 @@ public void testInitMapCombineReduceWithParamsAndReduceParams() { reduceParams ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 12)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 12)); + } + ); } public void testInitMapCombineReduceWithParamsStored() { @@ -931,31 +956,33 @@ public void testInitMapCombineReduceWithParamsStored() { Map params = new HashMap<>(); params.put("vars", varsMap); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params) - .initScript(new Script(ScriptType.STORED, null, "initScript_stored", Collections.emptyMap())) - .mapScript(new Script(ScriptType.STORED, null, "mapScript_stored", Collections.emptyMap())) - .combineScript(new Script(ScriptType.STORED, null, "combineScript_stored", Collections.emptyMap())) - .reduceScript(new Script(ScriptType.STORED, null, "reduceScript_stored", Collections.emptyMap())) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(new Script(ScriptType.STORED, null, "initScript_stored", Collections.emptyMap())) + .mapScript(new Script(ScriptType.STORED, null, "mapScript_stored", Collections.emptyMap())) + .combineScript(new Script(ScriptType.STORED, null, "combineScript_stored", Collections.emptyMap())) + .reduceScript(new Script(ScriptType.STORED, null, "reduceScript_stored", Collections.emptyMap())) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + + Aggregation aggregation = response.getAggregations().get("scripted"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); + } + ); } public void testInitMapCombineReduceWithParamsAsSubAgg() { @@ -985,49 +1012,51 @@ public void testInitMapCombineReduceWithParamsAsSubAgg() { Collections.emptyMap() ); - SearchResponse response = prepareSearch("idx").setQuery(matchAllQuery()) - .setSize(1000) - .addAggregation( - histogram("histo").field("l_value") - .interval(1) - .subAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - ) - .get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); - Aggregation aggregation = response.getAggregations().get("histo"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(Histogram.class)); - Histogram histoAgg = (Histogram) aggregation; - assertThat(histoAgg.getName(), equalTo("histo")); - List buckets = histoAgg.getBuckets(); - assertThat(buckets, notNullValue()); - for (Bucket b : buckets) { - assertThat(b, notNullValue()); - assertThat(b.getDocCount(), equalTo(1L)); - Aggregations subAggs = b.getAggregations(); - assertThat(subAggs, notNullValue()); - assertThat(subAggs.asList().size(), equalTo(1)); - Aggregation subAgg = subAggs.get("scripted"); - assertThat(subAgg, notNullValue()); - assertThat(subAgg, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) subAgg; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(1)); - Object object = aggregationList.get(0); - assertThat(object, notNullValue()); - assertThat(object, instanceOf(Number.class)); - assertThat(((Number) object).longValue(), equalTo(3L)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .setSize(1000) + .addAggregation( + histogram("histo").field("l_value") + .interval(1) + .subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); + Aggregation aggregation = response.getAggregations().get("histo"); + assertThat(aggregation, notNullValue()); + assertThat(aggregation, instanceOf(Histogram.class)); + Histogram histoAgg = (Histogram) aggregation; + assertThat(histoAgg.getName(), equalTo("histo")); + List buckets = histoAgg.getBuckets(); + assertThat(buckets, notNullValue()); + for (Bucket b : buckets) { + assertThat(b, notNullValue()); + assertThat(b.getDocCount(), equalTo(1L)); + Aggregations subAggs = b.getAggregations(); + assertThat(subAggs, notNullValue()); + assertThat(subAggs.asList().size(), equalTo(1)); + Aggregation subAgg = subAggs.get("scripted"); + assertThat(subAgg, notNullValue()); + assertThat(subAgg, instanceOf(ScriptedMetric.class)); + ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) subAgg; + assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); + assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); + assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); + List aggregationList = (List) scriptedMetricAggregation.aggregation(); + assertThat(aggregationList.size(), equalTo(1)); + Object object = aggregationList.get(0); + assertThat(object, notNullValue()); + assertThat(object, instanceOf(Number.class)); + assertThat(((Number) object).longValue(), equalTo(3L)); + } + } + ); } public void testEmptyAggregation() throws Exception { @@ -1057,36 +1086,38 @@ public void testEmptyAggregation() throws Exception { Collections.emptyMap() ); - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - scriptedMetric("scripted").params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript) - ) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted"); - assertThat(scriptedMetric, notNullValue()); - assertThat(scriptedMetric.getName(), equalTo("scripted")); - assertThat(scriptedMetric.aggregation(), notNullValue()); - assertThat(scriptedMetric.aggregation(), instanceOf(List.class)); - @SuppressWarnings("unchecked") // We'll just get a ClassCastException a couple lines down if we're wrong, its ok. - List aggregationResult = (List) scriptedMetric.aggregation(); - assertThat(aggregationResult.size(), equalTo(1)); - assertThat(aggregationResult.get(0), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted"); + assertThat(scriptedMetric, notNullValue()); + assertThat(scriptedMetric.getName(), equalTo("scripted")); + assertThat(scriptedMetric.aggregation(), notNullValue()); + assertThat(scriptedMetric.aggregation(), instanceOf(List.class)); + @SuppressWarnings("unchecked") // We'll just get a ClassCastException a couple lines down if we're wrong, its ok. + List aggregationResult = (List) scriptedMetric.aggregation(); + assertThat(aggregationResult.size(), equalTo(1)); + assertThat(aggregationResult.get(0), equalTo(0)); + } + ); } /** @@ -1114,8 +1145,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -1129,12 +1160,15 @@ public void testScriptCaching() throws Exception { ); // Test that a non-deterministic init script causes the result to not be cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - scriptedMetric("foo").initScript(ndInitScript).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + scriptedMetric("foo").initScript(ndInitScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1146,10 +1180,10 @@ public void testScriptCaching() throws Exception { ); // Test that a non-deterministic map script causes the result to not be cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(ndMapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(ndMapScript).combineScript(combineScript).reduceScript(reduceScript)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1161,10 +1195,10 @@ public void testScriptCaching() throws Exception { ); // Test that a non-deterministic combine script causes the result to not be cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(ndRandom).reduceScript(reduceScript)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(ndRandom).reduceScript(reduceScript)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1176,10 +1210,10 @@ public void testScriptCaching() throws Exception { ); // NOTE: random reduce scripts don't hit the query shard context (they are done on the coordinator) and so can be cached. - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(ndRandom)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(ndRandom)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1191,10 +1225,10 @@ public void testScriptCaching() throws Exception { ); // Test that all deterministic scripts cause the request to be cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index eb4d5aa74f2a0..f97d886ae8df6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -35,6 +35,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -48,165 +49,175 @@ protected Collection> nodePlugins() { @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(stats("stats").field("value"))) - .get(); - - assertShardExecutionState(searchResponse, 0); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Stats stats = bucket.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(Double.isNaN(stats.getAvg()), is(true)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(stats("stats").field("value")) + ), + response -> { + assertShardExecutionState(response, 0); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(Double.isNaN(stats.getAvg()), is(true)); + } + ); } @Override public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("value")).get(); - - assertShardExecutionState(searchResponse, 0); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("value")), response -> { + assertShardExecutionState(response, 0); - assertHitCount(searchResponse, 10); + assertHitCount(response, 10); - Stats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getCount(), equalTo(10L)); + Stats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getCount(), equalTo(10L)); + }); } public void testSingleValuedField_WithFormatter() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(stats("stats").format("0000.0").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Stats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); - assertThat(stats.getAvgAsString(), equalTo("0005.5")); - assertThat(stats.getMin(), equalTo(1.0)); - assertThat(stats.getMinAsString(), equalTo("0001.0")); - assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getMaxAsString(), equalTo("0010.0")); - assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(stats.getSumAsString(), equalTo("0055.0")); - assertThat(stats.getCount(), equalTo(10L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").format("0000.0").field("value")), + response -> { + assertHitCount(response, 10); + + Stats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); + assertThat(stats.getAvgAsString(), equalTo("0005.5")); + assertThat(stats.getMin(), equalTo(1.0)); + assertThat(stats.getMinAsString(), equalTo("0001.0")); + assertThat(stats.getMax(), equalTo(10.0)); + assertThat(stats.getMaxAsString(), equalTo("0010.0")); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(stats.getSumAsString(), equalTo("0055.0")); + assertThat(stats.getCount(), equalTo(10L)); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(stats("stats").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Stats stats = global.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - Stats statsFromProperty = (Stats) ((InternalAggregation) global).getProperty("stats"); - assertThat(statsFromProperty, notNullValue()); - assertThat(statsFromProperty, sameInstance(stats)); - double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; - assertThat(stats.getAvg(), equalTo(expectedAvgValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); - double expectedMinValue = 1.0; - assertThat(stats.getMin(), equalTo(expectedMinValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); - double expectedMaxValue = 10.0; - assertThat(stats.getMax(), equalTo(expectedMaxValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); - double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; - assertThat(stats.getSum(), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); - long expectedCountValue = 10; - assertThat(stats.getCount(), equalTo(expectedCountValue)); - assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(stats("stats").field("value"))), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Stats stats = global.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + Stats statsFromProperty = (Stats) ((InternalAggregation) global).getProperty("stats"); + assertThat(statsFromProperty, notNullValue()); + assertThat(statsFromProperty, sameInstance(stats)); + double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; + assertThat(stats.getAvg(), equalTo(expectedAvgValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); + double expectedMinValue = 1.0; + assertThat(stats.getMin(), equalTo(expectedMinValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); + double expectedMaxValue = 10.0; + assertThat(stats.getMax(), equalTo(expectedMaxValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); + double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; + assertThat(stats.getSum(), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); + long expectedCountValue = 10; + assertThat(stats.getCount(), equalTo(expectedCountValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + } + ); } @Override public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("values")).get(); - - assertShardExecutionState(searchResponse, 0); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(stats("stats").field("values")), response -> { + assertShardExecutionState(response, 0); - assertHitCount(searchResponse, 10); + assertHitCount(response, 10); - Stats stats = searchResponse.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getName(), equalTo("stats")); - assertThat( - stats.getAvg(), - equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) - ); - assertThat(stats.getMin(), equalTo(2.0)); - assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); - assertThat(stats.getCount(), equalTo(20L)); + Stats stats = response.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); + assertThat(stats.getMin(), equalTo(2.0)); + assertThat(stats.getMax(), equalTo(12.0)); + assertThat( + stats.getSum(), + equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) + ); + assertThat(stats.getCount(), equalTo(20L)); + }); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Stats stats = filter.getAggregations().get("stats"); - assertThat(stats, notNullValue()); - assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(stats.getAvg(), equalTo(Double.NaN)); - assertThat(stats.getSum(), equalTo(0.0)); - assertThat(stats.getCount(), equalTo(0L)); - - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Stats stats = filter.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(stats.getAvg(), equalTo(Double.NaN)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + + } + } + ); } - private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception { + private void assertShardExecutionState(SearchResponse response, int expectedFailures) { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { @@ -228,8 +239,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -243,13 +254,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - stats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + stats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -261,13 +272,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - stats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + stats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -279,8 +290,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(stats("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(stats("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index a837b22694ef5..37524dabe7f09 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -39,6 +38,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -66,9 +67,9 @@ public void setupSuiteScopeCluster() throws Exception { prepareCreate("new_index").setMapping("transit_mode", "type=keyword", "route_length_miles", "type=double").get(); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("old_index").setSource("transit_mode", "train", "distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource("transit_mode", "bus", "distance", 50.5)); - builders.add(client().prepareIndex("new_index").setSource("transit_mode", "train", "route_length_miles", 100.2)); + builders.add(prepareIndex("old_index").setSource("transit_mode", "train", "distance", 42.0)); + builders.add(prepareIndex("old_index").setSource("transit_mode", "bus", "distance", 50.5)); + builders.add(prepareIndex("new_index").setSource("transit_mode", "train", "route_length_miles", 100.2)); indexRandom(true, builders); ensureSearchable(); @@ -77,20 +78,22 @@ public void setupSuiteScopeCluster() throws Exception { @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(sum("sum").field("value"))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo(0.0)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(sum("sum").field("value"))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo(0.0)); + } + ); } /** This test has been moved to {@link SumAggregatorTests#testUnmapped()} */ @@ -99,100 +102,104 @@ public void testUnmapped() throws Exception {} @Override public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("value")).get(); - - assertHitCount(searchResponse, 10); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("value")), response -> { + assertHitCount(response, 10); - Sum sum = searchResponse.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + }); } public void testSingleValuedFieldWithFormatter() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(sum("sum").format("0000.0").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Sum sum = searchResponse.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); - assertThat(sum.getValueAsString(), equalTo("0055.0")); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").format("0000.0").field("value")), + response -> { + assertHitCount(response, 10); + + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); + assertThat(sum.getValueAsString(), equalTo("0055.0")); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(sum("sum").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Sum sum = global.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - double expectedSumValue = (double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; - assertThat(sum.value(), equalTo(expectedSumValue)); - assertThat((Sum) ((InternalAggregation) global).getProperty("sum"), equalTo(sum)); - assertThat((double) ((InternalAggregation) global).getProperty("sum.value"), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation) sum).getProperty("value"), equalTo(expectedSumValue)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(sum("sum").field("value"))), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Sum sum = global.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + double expectedSumValue = (double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; + assertThat(sum.value(), equalTo(expectedSumValue)); + assertThat((Sum) ((InternalAggregation) global).getProperty("sum"), equalTo(sum)); + assertThat((double) ((InternalAggregation) global).getProperty("sum.value"), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) sum).getProperty("value"), equalTo(expectedSumValue)); + } + ); } @Override public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("values")).get(); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(sum("sum").field("values")), response -> { + assertHitCount(response, 10); - assertHitCount(searchResponse, 10); - - Sum sum = searchResponse.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12)); + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12)); + }); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Sum sum = filter.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(0.0)); - - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Sum sum = filter.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(0.0)); + + } + } + ); } /** @@ -206,8 +213,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -221,12 +228,12 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -238,12 +245,12 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -255,8 +262,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(sum("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(sum("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -269,46 +275,48 @@ public void testScriptCaching() throws Exception { } public void testFieldAlias() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation(sum("sum").field("route_length_miles")).get(); - - assertNoFailures(response); - - Sum sum = response.getAggregations().get("sum"); - assertThat(sum, IsNull.notNullValue()); - assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.value(), equalTo(192.7)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation(sum("sum").field("route_length_miles")), + response -> { + Sum sum = response.getAggregations().get("sum"); + assertThat(sum, IsNull.notNullValue()); + assertThat(sum.getName(), equalTo("sum")); + assertThat(sum.value(), equalTo(192.7)); + } + ); } public void testFieldAliasInSubAggregation() { - SearchResponse response = prepareSearch("old_index", "new_index").addAggregation( - terms("terms").field("transit_mode").subAggregation(sum("sum").field("route_length_miles")) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(2)); - - Terms.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("train")); - assertThat(bucket.getDocCount(), equalTo(2L)); - - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(142.2)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKey(), equalTo("bus")); - assertThat(bucket.getDocCount(), equalTo(1L)); - - sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - assertThat(sum.value(), equalTo(50.5)); + assertNoFailuresAndResponse( + prepareSearch("old_index", "new_index").addAggregation( + terms("terms").field("transit_mode").subAggregation(sum("sum").field("route_length_miles")) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + Terms.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("train")); + assertThat(bucket.getDocCount(), equalTo(2L)); + + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(142.2)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo("bus")); + assertThat(bucket.getDocCount(), equalTo(1L)); + + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(50.5)); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 421d6f118c277..f1a4c9e5bd7a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -40,6 +39,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -96,26 +96,28 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value"))) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value"))) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + } + ); } public void testNullValuesField() throws Exception { @@ -142,95 +144,109 @@ public void testEmptyValuesField() throws Exception { @Override public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 })).field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - PercentileRanks reversePercentiles = searchResponse.getAggregations().get("percentile_ranks"); - assertThat(reversePercentiles, notNullValue()); - assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); - assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); - assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 })).field("value")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + PercentileRanks reversePercentiles = response.getAggregations().get("percentile_ranks"); + assertThat(reversePercentiles, notNullValue()); + assertThat(reversePercentiles.getName(), equalTo("percentile_ranks")); + assertThat(reversePercentiles.percent(0), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(10), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(15), equalTo(Double.NaN)); + assertThat(reversePercentiles.percent(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - PercentileRanks values = global.getAggregations().get("percentile_ranks"); - assertThat(values, notNullValue()); - assertThat(values.getName(), equalTo("percentile_ranks")); - assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + PercentileRanks values = global.getAggregations().get("percentile_ranks"); + assertThat(values, notNullValue()); + assertThat(values.getName(), equalTo("percentile_ranks")); + assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); + } + ); } public void testSingleValuedFieldOutsideRange() throws Exception { final double[] pcts = new double[] { minValue - 1, maxValue + 1 }; - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1); + } + ); } @Override @@ -238,61 +254,71 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1); + } + ); } @Override public void testMultiValuedField() throws Exception { final double[] pcts = randomPercents(minValues, maxValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("values")) - .get(); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("values")), + response -> { - assertHitCount(searchResponse, 10); + assertHitCount(response, 10); - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues); + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercents(-maxValues, -minValues); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, -maxValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, -maxValues); + } + ); } @Override @@ -300,34 +326,38 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1); + } + ); } @Override public void testScriptSingleValued() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()) + ) + ), + response -> { + assertHitCount(response, 10); - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue); + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue); + } + ); } @Override @@ -338,28 +368,32 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValue - 1); + } + ); } @Override public void testScriptMultiValued() throws Exception { final double[] pcts = randomPercents(minValues, maxValues); Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues); + } + ); } @Override @@ -367,78 +401,84 @@ public void testScriptMultiValuedWithParams() throws Exception { Script script = AggregationTestScriptsPlugin.DECREMENT_ALL_VALUES; final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)), + response -> { + assertHitCount(response, 10); + + final PercentileRanks values = response.getAggregations().get("percentile_ranks"); + assertConsistent(pcts, values, minValues - 1); + } + ); } public void testOrderBySubAggregation() { boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 99 }).field("value"))) - .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); - double p99 = values.percent(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 99 }).field("value"))) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + PercentileRanks values = bucket.getAggregations().get("percentile_ranks"); + double p99 = values.percent(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.TDIGEST).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.TDIGEST).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - PercentileRanks ranks = filter.getAggregations().get("ranks"); - assertThat(ranks, notNullValue()); - assertThat(ranks.percent(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + PercentileRanks ranks = filter.getAggregations().get("ranks"); + assertThat(ranks, notNullValue()); + assertThat(ranks.percent(99), equalTo(Double.NaN)); + + } + } + ); } /** @@ -452,8 +492,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -467,13 +507,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -485,13 +525,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentileRanks("foo", new double[] { 50.0 }).field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -503,8 +543,9 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo", new double[] { 50.0 }).field("d")).get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo", new double[] { 50.0 }).field("d")) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -515,5 +556,4 @@ public void testScriptCaching() throws Exception { equalTo(2L) ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 58b2b13853848..98086451c3456 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; @@ -42,6 +41,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -102,108 +102,122 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa @Override public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(1L) - .minDocCount(0) - .subAggregation(randomCompression(percentiles("percentiles").field("value")).percentiles(10, 15)) - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertResponse( + prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(randomCompression(percentiles("percentiles").field("value")).percentiles(10, 15)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + Histogram.Bucket bucket = histo.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + } + ); } @Override public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(0, 10, 15, 100)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(percentiles.percentile(0), equalTo(Double.NaN)); - assertThat(percentiles.percentile(10), equalTo(Double.NaN)); - assertThat(percentiles.percentile(15), equalTo(Double.NaN)); - assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + assertResponse( + prepareSearch("idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(0, 10, 15, 100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + Percentiles percentiles = response.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(percentiles.percentile(0), equalTo(Double.NaN)); + assertThat(percentiles.percentile(10), equalTo(Double.NaN)); + assertThat(percentiles.percentile(15), equalTo(Double.NaN)); + assertThat(percentiles.percentile(100), equalTo(Double.NaN)); + } + ); } @Override public void testSingleValuedField() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue); + } + ); } @Override public void testSingleValuedFieldGetProperty() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Percentiles percentiles = global.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) + ), + response -> { + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + Percentiles percentiles = global.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.getName(), equalTo("percentiles")); + assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); + } + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue); + } + ); } @Override public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + } + ); } @Override @@ -211,64 +225,72 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + } + ); } @Override public void testMultiValuedField() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("values").percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("values").percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues); + } + ); } @Override public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + } + ); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, -maxValues, -minValues); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, -maxValues, -minValues); + } + ); } @Override @@ -276,32 +298,36 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - randomCompression(percentiles("percentiles")).field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts) - ) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + } + ); } @Override public void testScriptSingleValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue, maxValue); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue, maxValue); + } + ); } @Override @@ -312,14 +338,16 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1); + } + ); } @Override @@ -327,14 +355,16 @@ public void testScriptMultiValued() throws Exception { final double[] pcts = randomPercentiles(); Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues, maxValues); + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues, maxValues); + } + ); } @Override @@ -342,78 +372,83 @@ public void testScriptMultiValuedWithParams() throws Exception { Script script = AggregationTestScriptsPlugin.DECREMENT_ALL_VALUES; final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) - .get(); - - assertHitCount(searchResponse, 10); - - final Percentiles percentiles = searchResponse.getAggregations().get("percentiles"); - assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)), + response -> { + assertHitCount(response, 10); + + final Percentiles percentiles = response.getAggregations().get("percentiles"); + assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1); + } + ); } public void testOrderBySubAggregation() { boolean asc = randomBoolean(); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value") - .interval(2L) - .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) - .order(BucketOrder.aggregation("percentiles", "99", asc)) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Histogram histo = searchResponse.getAggregations().get("histo"); - double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - for (Histogram.Bucket bucket : histo.getBuckets()) { - Percentiles percentiles = bucket.getAggregations().get("percentiles"); - double p99 = percentiles.percentile(99); - if (asc) { - assertThat(p99, greaterThanOrEqualTo(previous)); - } else { - assertThat(p99, lessThanOrEqualTo(previous)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) + .order(BucketOrder.aggregation("percentiles", "99", asc)) + ), + response -> { + assertHitCount(response, 10); + + Histogram histo = response.getAggregations().get("histo"); + double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; + for (Histogram.Bucket bucket : histo.getBuckets()) { + Percentiles percentiles = bucket.getAggregations().get("percentiles"); + double p99 = percentiles.percentile(99); + if (asc) { + assertThat(p99, greaterThanOrEqualTo(previous)); + } else { + assertThat(p99, lessThanOrEqualTo(previous)); + } + previous = p99; + } } - previous = p99; - } + ); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation( - percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value") + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value") + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Percentiles percentiles = filter.getAggregations().get("percentiles"); - assertThat(percentiles, notNullValue()); - assertThat(percentiles.percentile(99), equalTo(Double.NaN)); - - } + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Percentiles percentiles = filter.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.percentile(99), equalTo(Double.NaN)); + } + } + ); } /** @@ -427,8 +462,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -442,14 +477,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -461,14 +496,14 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - percentiles("foo").field("d") - .percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + percentiles("foo").field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -480,8 +515,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d").percentiles(50.0)).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d").percentiles(50.0))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index ab9ab37894f70..d878dc981b17f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; @@ -65,6 +64,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.xcontent.XContentFactory.yamlBuilder; @@ -156,8 +156,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 50; i++) { builders.add( - client().prepareIndex("idx") - .setId(Integer.toString(i)) + prepareIndex("idx").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field(TERMS_AGGS_FIELD, "val" + (i / 10)) @@ -171,48 +170,39 @@ public void setupSuiteScopeCluster() throws Exception { } builders.add( - client().prepareIndex("field-collapsing") - .setId("1") + prepareIndex("field-collapsing").setId("1") .setSource(jsonBuilder().startObject().field("group", "a").field("text", "term x y z b").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("2") + prepareIndex("field-collapsing").setId("2") .setSource(jsonBuilder().startObject().field("group", "a").field("text", "term x y z n rare").field("value", 1).endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("3") + prepareIndex("field-collapsing").setId("3") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x y z term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("4") + prepareIndex("field-collapsing").setId("4") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x y term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("5") + prepareIndex("field-collapsing").setId("5") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("6") + prepareIndex("field-collapsing").setId("6") .setSource(jsonBuilder().startObject().field("group", "b").field("text", "term rare").field("value", 3).endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("7") + prepareIndex("field-collapsing").setId("7") .setSource(jsonBuilder().startObject().field("group", "c").field("text", "x y z term").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("8") + prepareIndex("field-collapsing").setId("8") .setSource(jsonBuilder().startObject().field("group", "c").field("text", "x y term b").endObject()) ); builders.add( - client().prepareIndex("field-collapsing") - .setId("9") + prepareIndex("field-collapsing").setId("9") .setSource(jsonBuilder().startObject().field("group", "c").field("text", "rare x term").field("value", 2).endObject()) ); @@ -227,12 +217,11 @@ public void setupSuiteScopeCluster() throws Exception { } builder.endArray().endObject(); - builders.add(client().prepareIndex("articles").setSource(builder)); + builders.add(prepareIndex("articles").setSource(builder)); } builders.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "title 1") @@ -275,8 +264,7 @@ public void setupSuiteScopeCluster() throws Exception { ) ); builders.add( - client().prepareIndex("articles") - .setId("2") + prepareIndex("articles").setId("2") .setSource( jsonBuilder().startObject() .field("title", "title 2") @@ -311,358 +299,365 @@ private String key(Terms.Bucket bucket) { } public void testBasics() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - long higestSortValue = 0; - for (int i = 0; i < 5; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - higestSortValue += 10; - assertThat((Long) hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); - assertThat((Long) hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); - assertThat((Long) hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); - - assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + long higestSortValue = 0; + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + higestSortValue += 10; + assertThat((Long) hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); + assertThat((Long) hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); + assertThat((Long) hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); + + assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); + } + } + ); } public void testIssue11119() throws Exception { // Test that top_hits aggregation is fed scores if query results size=0 - SearchResponse response = prepareSearch("field-collapsing").setSize(0) - .setQuery(matchQuery("text", "x y z")) - .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo(8L)); - assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - float bestScore = Float.MAX_VALUE; - for (int h = 0; h < hits.getHits().length; h++) { - float score = hits.getAt(h).getScore(); - assertThat(score, lessThanOrEqualTo(bestScore)); - assertThat(score, greaterThan(0f)); - bestScore = hits.getAt(h).getScore(); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setSize(0) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + assertThat(response.getHits().getHits().length, equalTo(0)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + float bestScore = Float.MAX_VALUE; + for (int h = 0; h < hits.getHits().length; h++) { + float score = hits.getAt(h).getScore(); + assertThat(score, lessThanOrEqualTo(bestScore)); + assertThat(score, greaterThan(0f)); + bestScore = hits.getAt(h).getScore(); + } + } } - } + ); // Also check that min_score setting works when size=0 // (technically not a test of top_hits but implementation details are // tied up with the need to feed scores into the agg tree even when // users don't want ranked set of query results.) - response = prepareSearch("field-collapsing").setSize(0) - .setMinScore(0.0001f) - .setQuery(matchQuery("text", "x y z")) - .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group")) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo(8L)); - assertThat(response.getHits().getHits().length, equalTo(0)); - assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); - terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setSize(0) + .setMinScore(0.0001f) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(8L)); + assertThat(response.getHits().getHits().length, equalTo(0)); + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + } + ); } public void testBreadthFirstWithScoreNeeded() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").size(3)) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - - assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").size(3)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + + assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); + } + } + ); } public void testBreadthFirstWithAggOrderAndScoreNeeded() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TERMS_AGGS_FIELD) - .order(BucketOrder.aggregation("max", false)) - .subAggregation(max("max").field(SORT_FIELD)) - .subAggregation(topHits("hits").size(3)) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - int id = 4; - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + id)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - - assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); - id--; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TERMS_AGGS_FIELD) + .order(BucketOrder.aggregation("max", false)) + .subAggregation(max("max").field(SORT_FIELD)) + .subAggregation(topHits("hits").size(3)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + int id = 4; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + id)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + + assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(5)); + id--; + } + } + ); } public void testBasicsGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(topHits("hits"))) - .get(); - - assertNoFailures(searchResponse); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - TopHits topHits = global.getAggregations().get("hits"); - assertThat(topHits, notNullValue()); - assertThat(topHits.getName(), equalTo("hits")); - assertThat((TopHits) ((InternalAggregation) global).getProperty("hits"), sameInstance(topHits)); - + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(topHits("hits"))), + response -> { + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + TopHits topHits = global.getAggregations().get("hits"); + assertThat(topHits, notNullValue()); + assertThat(topHits.getName(), equalTo("hits")); + assertThat((TopHits) ((InternalAggregation) global).getProperty("hits"), sameInstance(topHits)); + } + ); } public void testPagination() throws Exception { int size = randomIntBetween(1, 10); int from = randomIntBetween(0, 10); - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).from(from).size(size)) - ).get(); - assertNoFailures(response); - - SearchResponse control = prepareSearch("idx").setFrom(from) - .setSize(size) - .setPostFilter(QueryBuilders.termQuery(TERMS_AGGS_FIELD, "val0")) - .addSort(SORT_FIELD, SortOrder.DESC) - .get(); - assertNoFailures(control); - SearchHits controlHits = control.getHits(); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - Terms.Bucket bucket = terms.getBucketByKey("val0"); - assertThat(bucket, notNullValue()); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(controlHits.getTotalHits().value)); - assertThat(hits.getHits().length, equalTo(controlHits.getHits().length)); - for (int i = 0; i < hits.getHits().length; i++) { - logger.info( - "{}: top_hits: [{}][{}] control: [{}][{}]", - i, - hits.getAt(i).getId(), - hits.getAt(i).getSortValues()[0], - controlHits.getAt(i).getId(), - controlHits.getAt(i).getSortValues()[0] - ); - assertThat(hits.getAt(i).getId(), equalTo(controlHits.getAt(i).getId())); - assertThat(hits.getAt(i).getSortValues()[0], equalTo(controlHits.getAt(i).getSortValues()[0])); - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).from(from).size(size)) + ), + response -> { + assertNoFailuresAndResponse( + prepareSearch("idx").setFrom(from) + .setSize(size) + .setPostFilter(QueryBuilders.termQuery(TERMS_AGGS_FIELD, "val0")) + .addSort(SORT_FIELD, SortOrder.DESC), + control -> { + assertNoFailures(control); + SearchHits controlHits = control.getHits(); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + Terms.Bucket bucket = terms.getBucketByKey("val0"); + assertThat(bucket, notNullValue()); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(controlHits.getTotalHits().value)); + assertThat(hits.getHits().length, equalTo(controlHits.getHits().length)); + for (int i = 0; i < hits.getHits().length; i++) { + logger.info( + "{}: top_hits: [{}][{}] control: [{}][{}]", + i, + hits.getAt(i).getId(), + hits.getAt(i).getSortValues()[0], + controlHits.getAt(i).getId(), + controlHits.getAt(i).getSortValues()[0] + ); + assertThat(hits.getAt(i).getId(), equalTo(controlHits.getAt(i).getId())); + assertThat(hits.getAt(i).getSortValues()[0], equalTo(controlHits.getAt(i).getSortValues()[0])); + } + } + ); + } + ); } public void testSortByBucket() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .order(BucketOrder.aggregation("max_sort", false)) - .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true)) - .subAggregation(max("max_sort").field(SORT_FIELD)) - ).get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - long higestSortValue = 50; - int currentBucket = 4; - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(key(bucket), equalTo("val" + currentBucket--)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - assertThat(hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); - assertThat(hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); - assertThat(hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); - Max max = bucket.getAggregations().get("max_sort"); - assertThat(max.value(), equalTo(((Long) higestSortValue).doubleValue())); - higestSortValue -= 10; - } + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .order(BucketOrder.aggregation("max_sort", false)) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true)) + .subAggregation(max("max_sort").field(SORT_FIELD)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + long higestSortValue = 50; + int currentBucket = 4; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(key(bucket), equalTo("val" + currentBucket--)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + assertThat(hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); + assertThat(hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); + assertThat(hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); + Max max = bucket.getAggregations().get("max_sort"); + assertThat(max.value(), equalTo(((Long) higestSortValue).doubleValue())); + higestSortValue -= 10; + } + } + ); } public void testFieldCollapsing() throws Exception { - SearchResponse response = prepareSearch("field-collapsing").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(matchQuery("text", "term rare")) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field("group") - .order(BucketOrder.aggregation("max_score", false)) - .subAggregation(topHits("hits").size(1)) - .subAggregation(max("max_score").field("value")) - ) - .get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - Iterator bucketIterator = terms.getBuckets().iterator(); - Terms.Bucket bucket = bucketIterator.next(); - assertThat(key(bucket), equalTo("b")); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(4L)); - assertThat(hits.getHits().length, equalTo(1)); - assertThat(hits.getAt(0).getId(), equalTo("6")); - - bucket = bucketIterator.next(); - assertThat(key(bucket), equalTo("c")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(3L)); - assertThat(hits.getHits().length, equalTo(1)); - assertThat(hits.getAt(0).getId(), equalTo("9")); - - bucket = bucketIterator.next(); - assertThat(key(bucket), equalTo("a")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(2L)); - assertThat(hits.getHits().length, equalTo(1)); - assertThat(hits.getAt(0).getId(), equalTo("2")); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(matchQuery("text", "term rare")) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field("group") + .order(BucketOrder.aggregation("max_score", false)) + .subAggregation(topHits("hits").size(1)) + .subAggregation(max("max_score").field("value")) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + Iterator bucketIterator = terms.getBuckets().iterator(); + Terms.Bucket bucket = bucketIterator.next(); + assertThat(key(bucket), equalTo("b")); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); + assertThat(hits.getHits().length, equalTo(1)); + assertThat(hits.getAt(0).getId(), equalTo("6")); + + bucket = bucketIterator.next(); + assertThat(key(bucket), equalTo("c")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(3L)); + assertThat(hits.getHits().length, equalTo(1)); + assertThat(hits.getAt(0).getId(), equalTo("9")); + + bucket = bucketIterator.next(); + assertThat(key(bucket), equalTo("a")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(2L)); + assertThat(hits.getHits().length, equalTo(1)); + assertThat(hits.getAt(0).getId(), equalTo("2")); + } + ); } public void testFetchFeatures() { final boolean seqNoAndTerm = randomBoolean(); - SearchResponse response = prepareSearch("idx").setQuery(matchQuery("text", "text").queryName("test")) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").size(1) - .highlighter(new HighlightBuilder().field("text")) - .explain(true) - .storedField("text") - .docValueField("field1") - .fetchField("field2") - .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .fetchSource("text", null) - .version(true) - .seqNoAndPrimaryTerm(seqNoAndTerm) - ) - ) - .get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(1)); - - SearchHit hit = hits.getAt(0); - HighlightField highlightField = hit.getHighlightFields().get("text"); - assertThat(highlightField.getFragments().length, equalTo(1)); - assertThat(highlightField.getFragments()[0].string(), equalTo("some text to entertain")); - - Explanation explanation = hit.getExplanation(); - assertThat(explanation.toString(), containsString("text:text")); - - long version = hit.getVersion(); - assertThat(version, equalTo(1L)); - - if (seqNoAndTerm) { - assertThat(hit.getSeqNo(), greaterThanOrEqualTo(0L)); - assertThat(hit.getPrimaryTerm(), greaterThanOrEqualTo(1L)); - } else { - assertThat(hit.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); - assertThat(hit.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); + assertNoFailuresAndResponse( + prepareSearch("idx").setQuery(matchQuery("text", "text").queryName("test")) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").size(1) + .highlighter(new HighlightBuilder().field("text")) + .explain(true) + .storedField("text") + .docValueField("field1") + .fetchField("field2") + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .fetchSource("text", null) + .version(true) + .seqNoAndPrimaryTerm(seqNoAndTerm) + ) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(1)); + + SearchHit hit = hits.getAt(0); + HighlightField highlightField = hit.getHighlightFields().get("text"); + assertThat(highlightField.fragments().length, equalTo(1)); + assertThat(highlightField.fragments()[0].string(), equalTo("some text to entertain")); + + long version = hit.getVersion(); + assertThat(version, equalTo(1L)); + + if (seqNoAndTerm) { + assertThat(hit.getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(hit.getPrimaryTerm(), greaterThanOrEqualTo(1L)); + } else { + assertThat(hit.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + assertThat(hit.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); + } + + assertThat(hit.getMatchedQueries()[0], equalTo("test")); + + DocumentField field1 = hit.field("field1"); + assertThat(field1.getValue(), equalTo(5L)); + + DocumentField field2 = hit.field("field2"); + assertThat(field2.getValue(), equalTo(2.71f)); + + assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); + + field2 = hit.field("script"); + assertThat(field2.getValue().toString(), equalTo("5")); + + assertThat(hit.getSourceAsMap().size(), equalTo(1)); + assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); + } } - - assertThat(hit.getMatchedQueries()[0], equalTo("test")); - - DocumentField field1 = hit.field("field1"); - assertThat(field1.getValue(), equalTo(5L)); - - DocumentField field2 = hit.field("field2"); - assertThat(field2.getValue(), equalTo(2.71f)); - - assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); - - field2 = hit.field("script"); - assertThat(field2.getValue().toString(), equalTo("5")); - - assertThat(hit.getSourceAsMap().size(), equalTo(1)); - assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); - } + ); } public void testInvalidSortField() throws Exception { @@ -679,194 +674,197 @@ public void testInvalidSortField() throws Exception { } public void testEmptyIndex() throws Exception { - SearchResponse response = prepareSearch("empty").addAggregation(topHits("hits")).get(); - assertNoFailures(response); - - TopHits hits = response.getAggregations().get("hits"); - assertThat(hits, notNullValue()); - assertThat(hits.getName(), equalTo("hits")); - assertThat(hits.getHits().getTotalHits().value, equalTo(0L)); + assertNoFailuresAndResponse(prepareSearch("empty").addAggregation(topHits("hits")), response -> { + TopHits hits = response.getAggregations().get("hits"); + assertThat(hits, notNullValue()); + assertThat(hits.getName(), equalTo("hits")); + assertThat(hits.getHits().getTotalHits().value, equalTo(0L)); + }); } public void testTrackScores() throws Exception { boolean[] trackScores = new boolean[] { true, false }; for (boolean trackScore : trackScores) { logger.info("Track score={}", trackScore); - SearchResponse response = prepareSearch("field-collapsing").setQuery(matchQuery("text", "term rare")) - .addAggregation( - terms("terms").field("group") - .subAggregation(topHits("hits").trackScores(trackScore).size(1).sort("_index", SortOrder.DESC)) - ) - .get(); - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(3)); - - Terms.Bucket bucket = terms.getBucketByKey("a"); - assertThat(key(bucket), equalTo("a")); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - - bucket = terms.getBucketByKey("b"); - assertThat(key(bucket), equalTo("b")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - - bucket = terms.getBucketByKey("c"); - assertThat(key(bucket), equalTo("c")); - topHits = bucket.getAggregations().get("hits"); - hits = topHits.getHits(); - assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); - assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertNoFailuresAndResponse( + prepareSearch("field-collapsing").setQuery(matchQuery("text", "term rare")) + .addAggregation( + terms("terms").field("group") + .subAggregation(topHits("hits").trackScores(trackScore).size(1).sort("_index", SortOrder.DESC)) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + Terms.Bucket bucket = terms.getBucketByKey("a"); + assertThat(key(bucket), equalTo("a")); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + + bucket = terms.getBucketByKey("b"); + assertThat(key(bucket), equalTo("b")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + + bucket = terms.getBucketByKey("c"); + assertThat(key(bucket), equalTo("c")); + topHits = bucket.getAggregations().get("hits"); + hits = topHits.getHits(); + assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + assertThat(hits.getAt(0).getScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN)); + } + ); } } public void testTopHitsInNestedSimple() throws Exception { - SearchResponse searchResponse = prepareSearch("articles").setQuery(matchQuery("title", "title")) - .addAggregation( - nested("to-comments", "comments").subAggregation( - terms("users").field("comments.user").subAggregation(topHits("top-comments").sort("comments.date", SortOrder.ASC)) - ) - ) - .get(); - - Nested nested = searchResponse.getAggregations().get("to-comments"); - assertThat(nested.getDocCount(), equalTo(4L)); - - Terms terms = nested.getAggregations().get("users"); - Terms.Bucket bucket = terms.getBucketByKey("a"); - assertThat(bucket.getDocCount(), equalTo(1L)); - TopHits topHits = bucket.getAggregations().get("top-comments"); - SearchHits searchHits = topHits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(1L)); - assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(1)); - - bucket = terms.getBucketByKey("b"); - assertThat(bucket.getDocCount(), equalTo(2L)); - topHits = bucket.getAggregations().get("top-comments"); - searchHits = topHits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(2L)); - assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(2)); - assertThat(searchHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(extractValue("date", searchHits.getAt(1).getSourceAsMap()), equalTo(3)); - - bucket = terms.getBucketByKey("c"); - assertThat(bucket.getDocCount(), equalTo(1L)); - topHits = bucket.getAggregations().get("top-comments"); - searchHits = topHits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(1L)); - assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(4)); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery(matchQuery("title", "title")) + .addAggregation( + nested("to-comments", "comments").subAggregation( + terms("users").field("comments.user").subAggregation(topHits("top-comments").sort("comments.date", SortOrder.ASC)) + ) + ), + response -> { + Nested nested = response.getAggregations().get("to-comments"); + assertThat(nested.getDocCount(), equalTo(4L)); + + Terms terms = nested.getAggregations().get("users"); + Terms.Bucket bucket = terms.getBucketByKey("a"); + assertThat(bucket.getDocCount(), equalTo(1L)); + TopHits topHits = bucket.getAggregations().get("top-comments"); + SearchHits searchHits = topHits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(1L)); + assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(1)); + + bucket = terms.getBucketByKey("b"); + assertThat(bucket.getDocCount(), equalTo(2L)); + topHits = bucket.getAggregations().get("top-comments"); + searchHits = topHits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(2L)); + assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(2)); + assertThat(searchHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(extractValue("date", searchHits.getAt(1).getSourceAsMap()), equalTo(3)); + + bucket = terms.getBucketByKey("c"); + assertThat(bucket.getDocCount(), equalTo(1L)); + topHits = bucket.getAggregations().get("top-comments"); + searchHits = topHits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(1L)); + assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(extractValue("date", searchHits.getAt(0).getSourceAsMap()), equalTo(4)); + } + ); } public void testTopHitsInSecondLayerNested() throws Exception { - SearchResponse searchResponse = prepareSearch("articles").setQuery(matchQuery("title", "title")) - .addAggregation( - nested("to-comments", "comments").subAggregation( - nested("to-reviewers", "comments.reviewers").subAggregation( - // Also need to sort on _doc because there are two reviewers with the same name - topHits("top-reviewers").sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) - ) - ).subAggregation(topHits("top-comments").sort("comments.date", SortOrder.DESC).size(4)) - ) - .get(); - assertNoFailures(searchResponse); - - Nested toComments = searchResponse.getAggregations().get("to-comments"); - assertThat(toComments.getDocCount(), equalTo(4L)); - - TopHits topComments = toComments.getAggregations().get("top-comments"); - assertThat(topComments.getHits().getTotalHits().value, equalTo(4L)); - assertThat(topComments.getHits().getHits().length, equalTo(4)); - - assertThat(topComments.getHits().getAt(0).getId(), equalTo("2")); - assertThat(topComments.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topComments.getHits().getAt(0).getNestedIdentity().getChild(), nullValue()); - - assertThat(topComments.getHits().getAt(1).getId(), equalTo("2")); - assertThat(topComments.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topComments.getHits().getAt(1).getNestedIdentity().getChild(), nullValue()); - - assertThat(topComments.getHits().getAt(2).getId(), equalTo("1")); - assertThat(topComments.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topComments.getHits().getAt(2).getNestedIdentity().getChild(), nullValue()); - - assertThat(topComments.getHits().getAt(3).getId(), equalTo("1")); - assertThat(topComments.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topComments.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topComments.getHits().getAt(3).getNestedIdentity().getChild(), nullValue()); - - Nested toReviewers = toComments.getAggregations().get("to-reviewers"); - assertThat(toReviewers.getDocCount(), equalTo(7L)); - - TopHits topReviewers = toReviewers.getAggregations().get("top-reviewers"); - assertThat(topReviewers.getHits().getTotalHits().value, equalTo(7L)); - assertThat(topReviewers.getHits().getHits().length, equalTo(7)); - - assertThat(topReviewers.getHits().getAt(0).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(0).getSourceAsMap()), equalTo("user a")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - assertThat(topReviewers.getHits().getAt(1).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(1).getSourceAsMap()), equalTo("user b")); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getOffset(), equalTo(1)); - - assertThat(topReviewers.getHits().getAt(2).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(2).getSourceAsMap()), equalTo("user c")); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - assertThat(topReviewers.getHits().getAt(3).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(3).getSourceAsMap()), equalTo("user c")); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(2)); - - assertThat(topReviewers.getHits().getAt(4).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(4).getSourceAsMap()), equalTo("user d")); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getOffset(), equalTo(1)); - - assertThat(topReviewers.getHits().getAt(5).getId(), equalTo("1")); - assertThat(extractValue("name", topReviewers.getHits().getAt(5).getSourceAsMap()), equalTo("user e")); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getOffset(), equalTo(2)); - - assertThat(topReviewers.getHits().getAt(6).getId(), equalTo("2")); - assertThat(extractValue("name", topReviewers.getHits().getAt(6).getSourceAsMap()), equalTo("user f")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); - assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery(matchQuery("title", "title")) + .addAggregation( + nested("to-comments", "comments").subAggregation( + nested("to-reviewers", "comments.reviewers").subAggregation( + // Also need to sort on _doc because there are two reviewers with the same name + topHits("top-reviewers").sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) + ) + ).subAggregation(topHits("top-comments").sort("comments.date", SortOrder.DESC).size(4)) + ), + response -> { + Nested toComments = response.getAggregations().get("to-comments"); + assertThat(toComments.getDocCount(), equalTo(4L)); + + TopHits topComments = toComments.getAggregations().get("top-comments"); + assertThat(topComments.getHits().getTotalHits().value, equalTo(4L)); + assertThat(topComments.getHits().getHits().length, equalTo(4)); + + assertThat(topComments.getHits().getAt(0).getId(), equalTo("2")); + assertThat(topComments.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topComments.getHits().getAt(0).getNestedIdentity().getChild(), nullValue()); + + assertThat(topComments.getHits().getAt(1).getId(), equalTo("2")); + assertThat(topComments.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topComments.getHits().getAt(1).getNestedIdentity().getChild(), nullValue()); + + assertThat(topComments.getHits().getAt(2).getId(), equalTo("1")); + assertThat(topComments.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topComments.getHits().getAt(2).getNestedIdentity().getChild(), nullValue()); + + assertThat(topComments.getHits().getAt(3).getId(), equalTo("1")); + assertThat(topComments.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topComments.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topComments.getHits().getAt(3).getNestedIdentity().getChild(), nullValue()); + + Nested toReviewers = toComments.getAggregations().get("to-reviewers"); + assertThat(toReviewers.getDocCount(), equalTo(7L)); + + TopHits topReviewers = toReviewers.getAggregations().get("top-reviewers"); + assertThat(topReviewers.getHits().getTotalHits().value, equalTo(7L)); + assertThat(topReviewers.getHits().getHits().length, equalTo(7)); + + assertThat(topReviewers.getHits().getAt(0).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(0).getSourceAsMap()), equalTo("user a")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + + assertThat(topReviewers.getHits().getAt(1).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(1).getSourceAsMap()), equalTo("user b")); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getOffset(), equalTo(1)); + + assertThat(topReviewers.getHits().getAt(2).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(2).getSourceAsMap()), equalTo("user c")); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(0)); + + assertThat(topReviewers.getHits().getAt(3).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(3).getSourceAsMap()), equalTo("user c")); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(2)); + + assertThat(topReviewers.getHits().getAt(4).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(4).getSourceAsMap()), equalTo("user d")); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getOffset(), equalTo(1)); + + assertThat(topReviewers.getHits().getAt(5).getId(), equalTo("1")); + assertThat(extractValue("name", topReviewers.getHits().getAt(5).getSourceAsMap()), equalTo("user e")); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getOffset(), equalTo(2)); + + assertThat(topReviewers.getHits().getAt(6).getId(), equalTo("2")); + assertThat(extractValue("name", topReviewers.getHits().getAt(6).getSourceAsMap()), equalTo("user f")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers")); + assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); } public void testNestedFetchFeatures() { @@ -875,96 +873,102 @@ public void testNestedFetchFeatures() { matchQuery("comments.message", "comment") ).highlighterType(hlType); - SearchResponse searchResponse = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test"), ScoreMode.Avg) - ) - .addAggregation( - nested("to-comments", "comments").subAggregation( - topHits("top-comments").size(1) - .highlighter(new HighlightBuilder().field(hlField)) - .explain(true) - .docValueField("comments.user") - .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .fetchSource("comments.message", null) - .version(true) - .sort("comments.date", SortOrder.ASC) - ) + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test"), ScoreMode.Avg) ) - .get(); - assertHitCount(searchResponse, 2); - Nested nested = searchResponse.getAggregations().get("to-comments"); - assertThat(nested.getDocCount(), equalTo(4L)); - - SearchHits hits = ((TopHits) nested.getAggregations().get("top-comments")).getHits(); - assertThat(hits.getTotalHits().value, equalTo(4L)); - SearchHit searchHit = hits.getAt(0); - assertThat(searchHit.getId(), equalTo("1")); - assertThat(searchHit.getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHit.getNestedIdentity().getOffset(), equalTo(0)); - - HighlightField highlightField = searchHit.getHighlightFields().get("comments.message"); - assertThat(highlightField.getFragments().length, equalTo(1)); - assertThat(highlightField.getFragments()[0].string(), equalTo("some comment")); - - // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not - // even have matched with the main query. - // If top_hits would have a query option then we can explain that query - Explanation explanation = searchHit.getExplanation(); - assertFalse(explanation.isMatch()); - - // Returns the version of the root document. Nested docs don't have a separate version - long version = searchHit.getVersion(); - assertThat(version, equalTo(1L)); - - assertThat(searchHit.getMatchedQueries(), arrayContaining("test")); - - DocumentField field = searchHit.field("comments.user"); - assertThat(field.getValue().toString(), equalTo("a")); - - field = searchHit.field("script"); - assertThat(field.getValue().toString(), equalTo("5")); - - assertThat(searchHit.getSourceAsMap().size(), equalTo(1)); - assertThat(extractValue("message", searchHit.getSourceAsMap()), equalTo("some comment")); + .addAggregation( + nested("to-comments", "comments").subAggregation( + topHits("top-comments").size(1) + .highlighter(new HighlightBuilder().field(hlField)) + .explain(true) + .docValueField("comments.user") + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .fetchSource("comments.message", null) + .version(true) + .sort("comments.date", SortOrder.ASC) + ) + ), + response -> { + assertHitCount(response, 2); + Nested nested = response.getAggregations().get("to-comments"); + assertThat(nested.getDocCount(), equalTo(4L)); + + SearchHits hits = ((TopHits) nested.getAggregations().get("top-comments")).getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); + SearchHit searchHit = hits.getAt(0); + assertThat(searchHit.getId(), equalTo("1")); + assertThat(searchHit.getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHit.getNestedIdentity().getOffset(), equalTo(0)); + + HighlightField highlightField = searchHit.getHighlightFields().get("comments.message"); + assertThat(highlightField.fragments().length, equalTo(1)); + assertThat(highlightField.fragments()[0].string(), equalTo("some comment")); + + // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not + // even have matched with the main query. + // If top_hits would have a query option then we can explain that query + Explanation explanation = searchHit.getExplanation(); + assertFalse(explanation.isMatch()); + + // Returns the version of the root document. Nested docs don't have a separate version + long version = searchHit.getVersion(); + assertThat(version, equalTo(1L)); + + assertThat(searchHit.getMatchedQueries(), arrayContaining("test")); + + DocumentField field = searchHit.field("comments.user"); + assertThat(field.getValue().toString(), equalTo("a")); + + field = searchHit.field("script"); + assertThat(field.getValue().toString(), equalTo("5")); + + assertThat(searchHit.getSourceAsMap().size(), equalTo(1)); + assertThat(extractValue("message", searchHit.getSourceAsMap()), equalTo("some comment")); + } + ); } public void testTopHitsInNested() throws Exception { - SearchResponse searchResponse = prepareSearch("articles").addAggregation( - histogram("dates").field("date") - .interval(5) - .subAggregation( - nested("to-comments", "comments").subAggregation( - topHits("comments").highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")) - ) - ).sort("comments.id", SortOrder.ASC) + assertNoFailuresAndResponse( + prepareSearch("articles").addAggregation( + histogram("dates").field("date") + .interval(5) + .subAggregation( + nested("to-comments", "comments").subAggregation( + topHits("comments").highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")) + ) + ).sort("comments.id", SortOrder.ASC) + ) ) - ) - ).get(); - - Histogram histogram = searchResponse.getAggregations().get("dates"); - for (int i = 0; i < numArticles; i += 5) { - Histogram.Bucket bucket = histogram.getBuckets().get(i / 5); - assertThat(bucket.getDocCount(), equalTo(5L)); - - long numNestedDocs = 10 + (5 * i); - Nested nested = bucket.getAggregations().get("to-comments"); - assertThat(nested.getDocCount(), equalTo(numNestedDocs)); - - TopHits hits = nested.getAggregations().get("comments"); - SearchHits searchHits = hits.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(numNestedDocs)); - for (int j = 0; j < 3; j++) { - assertThat(searchHits.getAt(j).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(searchHits.getAt(j).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(extractValue("id", searchHits.getAt(j).getSourceAsMap()), equalTo(0)); - - HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("comments.message"); - assertThat(highlightField.getFragments().length, equalTo(1)); - assertThat(highlightField.getFragments()[0].string(), equalTo("some text")); + ), + response -> { + Histogram histogram = response.getAggregations().get("dates"); + for (int i = 0; i < numArticles; i += 5) { + Histogram.Bucket bucket = histogram.getBuckets().get(i / 5); + assertThat(bucket.getDocCount(), equalTo(5L)); + + long numNestedDocs = 10 + (5 * i); + Nested nested = bucket.getAggregations().get("to-comments"); + assertThat(nested.getDocCount(), equalTo(numNestedDocs)); + + TopHits hits = nested.getAggregations().get("comments"); + SearchHits searchHits = hits.getHits(); + assertThat(searchHits.getTotalHits().value, equalTo(numNestedDocs)); + for (int j = 0; j < 3; j++) { + assertThat(searchHits.getAt(j).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(searchHits.getAt(j).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(extractValue("id", searchHits.getAt(j).getSourceAsMap()), equalTo(0)); + + HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("comments.message"); + assertThat(highlightField.fragments().length, equalTo(1)); + assertThat(highlightField.fragments()[0].string(), equalTo("some text")); + } + } } - } + ); } public void testUseMaxDocInsteadOfSize() throws Exception { @@ -1037,33 +1041,34 @@ public void testTooHighResultWindow() throws Exception { } public void testNoStoredFields() throws Exception { - SearchResponse response = prepareSearch("idx").addAggregation( - terms("terms").executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").storedField("_none_")) - ).get(); - - assertNoFailures(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - assertThat(terms.getBuckets().size(), equalTo(5)); - - for (int i = 0; i < 5; i++) { - Terms.Bucket bucket = terms.getBucketByKey("val" + i); - assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("val" + i)); - assertThat(bucket.getDocCount(), equalTo(10L)); - TopHits topHits = bucket.getAggregations().get("hits"); - SearchHits hits = topHits.getHits(); - assertThat(hits.getTotalHits().value, equalTo(10L)); - assertThat(hits.getHits().length, equalTo(3)); - for (SearchHit hit : hits) { - assertThat(hit.getSourceAsMap(), nullValue()); - assertThat(hit.getId(), nullValue()); + assertNoFailuresAndResponse( + prepareSearch("idx").addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").storedField("_none_")) + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + for (int i = 0; i < 5; i++) { + Terms.Bucket bucket = terms.getBucketByKey("val" + i); + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(10L)); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + assertThat(hits.getTotalHits().value, equalTo(10L)); + assertThat(hits.getHits().length, equalTo(3)); + for (SearchHit hit : hits) { + assertThat(hit.getSourceAsMap(), nullValue()); + assertThat(hit.getId(), nullValue()); + } + } } - } + ); } /** @@ -1080,8 +1085,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -1095,15 +1100,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script field does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").scriptField( - "bar", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()) + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").scriptField( + "bar", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()) + ) ) - ) - .get(); - assertNoFailures(r); + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1115,17 +1120,17 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script sort does not get cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").sort( - SortBuilders.scriptSort( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()), - ScriptSortType.STRING + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()), + ScriptSortType.STRING + ) ) ) - ) - .get(); - assertNoFailures(r); + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1137,12 +1142,15 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script field does not get cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").scriptField("bar", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").scriptField( + "bar", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()) + ) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1154,17 +1162,17 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script sort does not get cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - topHits("foo").sort( - SortBuilders.scriptSort( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), - ScriptSortType.STRING + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + topHits("foo").sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), + ScriptSortType.STRING + ) ) ) - ) - .get(); - assertNoFailures(r); + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1176,8 +1184,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -1194,62 +1201,66 @@ public void testScriptCaching() throws Exception { public void testWithRescore() { // Rescore with default sort on relevancy (score) - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) - .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits"))) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(4.0f)); + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits"))), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(4.0f)); + } } } - } + ); - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) - .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.scoreSort()))) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(4.0f)); + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.scoreSort()))), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(4.0f)); + } } } - } + ); // Rescore should not be applied if the sort order is not relevancy - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) .addAggregation( terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.fieldSort("_index"))) - ) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(Float.NaN)); + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(Float.NaN)); + } } } - } + ); - { - SearchResponse response = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + assertNoFailuresAndResponse( + prepareSearch("idx").addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) .addAggregation( terms("terms").field(TERMS_AGGS_FIELD) .subAggregation(topHits("hits").sort(SortBuilders.scoreSort()).sort(SortBuilders.fieldSort("_index"))) - ) - .get(); - Terms terms = response.getAggregations().get("terms"); - for (Terms.Bucket bucket : terms.getBuckets()) { - TopHits topHits = bucket.getAggregations().get("hits"); - for (SearchHit hit : topHits.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(Float.NaN)); + ), + response -> { + Terms terms = response.getAggregations().get("terms"); + for (Terms.Bucket bucket : terms.getBuckets()) { + TopHits topHits = bucket.getAggregations().get("hits"); + for (SearchHit hit : topHits.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(Float.NaN)); + } } } - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index d122ee10d90a5..7c5ab6600e365 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -38,6 +37,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -49,8 +49,7 @@ public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); createIndex("idx_unmapped"); for (int i = 0; i < 10; i++) { - client().prepareIndex("idx") - .setId("" + i) + prepareIndex("idx").setId("" + i) .setSource( jsonBuilder().startObject().field("value", i + 1).startArray("values").value(i + 2).value(i + 3).endArray().endObject() ) @@ -67,132 +66,147 @@ protected Collection> nodePlugins() { } public void testUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(count("count").field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(0L)); + assertResponse(prepareSearch("idx_unmapped").setQuery(matchAllQuery()).addAggregation(count("count").field("value")), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(0L)); + }); } public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("value")).get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("value")), response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + }); } public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(count("count").field("value"))) - .get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - ValueCount valueCount = global.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); - assertThat((ValueCount) ((InternalAggregation) global).getProperty("count"), equalTo(valueCount)); - assertThat((double) ((InternalAggregation) global).getProperty("count.value"), equalTo(10d)); - assertThat((double) ((InternalAggregation) valueCount).getProperty("value"), equalTo(10d)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(global("global").subAggregation(count("count").field("value"))), + response -> { + + assertHitCount(response, 10); + + Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), equalTo("global")); + assertThat(global.getDocCount(), equalTo(10L)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().size(), equalTo(1)); + + ValueCount valueCount = global.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + assertThat((ValueCount) ((InternalAggregation) global).getProperty("count"), equalTo(valueCount)); + assertThat((double) ((InternalAggregation) global).getProperty("count.value"), equalTo(10d)); + assertThat((double) ((InternalAggregation) valueCount).getProperty("value"), equalTo(10d)); + } + ); } public void testSingleValuedFieldPartiallyUnmapped() throws Exception { - SearchResponse searchResponse = prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()) - .addAggregation(count("count").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse( + prepareSearch("idx", "idx_unmapped").setQuery(matchAllQuery()).addAggregation(count("count").field("value")), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + } + ); } public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("values")).get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(20L)); + assertResponse(prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(count("count").field("values")), response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(20L)); + }); } public void testSingleValuedScript() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + } + ); } public void testMultiValuedScript() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap())) - ) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(20L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script( + new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap()) + ) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(20L)); + } + ); } public void testSingleValuedScriptWithParams() throws Exception { Map params = Collections.singletonMap("field", "value"); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(10L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params)) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(10L)); + } + ); } public void testMultiValuedScriptWithParams() throws Exception { Map params = Collections.singletonMap("field", "values"); - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) - .get(); - - assertHitCount(searchResponse, 10); - - ValueCount valueCount = searchResponse.getAggregations().get("count"); - assertThat(valueCount, notNullValue()); - assertThat(valueCount.getName(), equalTo("count")); - assertThat(valueCount.getValue(), equalTo(20L)); + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params)) + ), + response -> { + assertHitCount(response, 10); + + ValueCount valueCount = response.getAggregations().get("count"); + assertThat(valueCount, notNullValue()); + assertThat(valueCount.getName(), equalTo("count")); + assertThat(valueCount.getValue(), equalTo(20L)); + } + ); } /** @@ -206,8 +220,8 @@ public void testScriptCaching() throws Exception { ); indexRandom( true, - client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + prepareIndex("cache_test_idx").setId("2").setSource("s", 2) ); // Make sure we are starting with a clear cache @@ -221,12 +235,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - count("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + count("foo").field("d") + .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -238,13 +253,13 @@ public void testScriptCaching() throws Exception { ); // Test that a request using a deterministic script gets cached - r = prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - count("foo").field("d") - .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) - ) - .get(); - assertNoFailures(r); + assertNoFailures( + prepareSearch("cache_test_idx").setSize(0) + .addAggregation( + count("foo").field("d") + .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) + ) + ); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -256,8 +271,7 @@ public void testScriptCaching() throws Exception { ); // Ensure that non-scripted requests are cached as normal - r = prepareSearch("cache_test_idx").setSize(0).addAggregation(count("foo").field("d")).get(); - assertNoFailures(r); + assertNoFailures(prepareSearch("cache_test_idx").setSize(0).addAggregation(count("foo").field("d"))); assertThat( indicesAdmin().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), @@ -270,34 +284,35 @@ public void testScriptCaching() throws Exception { } public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value") - .order(BucketOrder.compound(BucketOrder.aggregation("filter>count", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(count("count").field("value"))) - ) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - ValueCount count = filter.getAggregations().get("count"); - assertThat(count, notNullValue()); - assertThat(count.value(), equalTo(0.0)); - - } + assertResponse( + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>count", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(count("count").field("value"))) + ), + response -> { + assertHitCount(response, 10); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + ValueCount count = filter.getAggregations().get("count"); + assertThat(count, notNullValue()); + assertThat(count.value(), equalTo(0.0)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java index 01b2d92de7d89..7509cf3815085 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java @@ -94,13 +94,9 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)).endObject() + ) ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; @@ -109,8 +105,7 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) + prepareIndex("empty_bucket_idx").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) ); } @@ -475,7 +470,7 @@ public void testFieldIsntWrittenOutTwice() throws Exception { .field("@timestamp", "2018-07-08T08:07:00.599Z") .endObject(); // end::noformat - client().prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); indicesAdmin().prepareRefresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index ba6522be755e9..16a570b6cd2fd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -134,7 +134,7 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int docs = 0; docs < numDocs; docs++) { - builders.add(client().prepareIndex("idx").setSource(newDocBuilder())); + builders.add(prepareIndex("idx").setSource(newDocBuilder())); } indexRandom(true, builders); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 0351734358968..bc518eb6c1294 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -85,8 +85,7 @@ public void setupSuiteScopeCluster() throws Exception { // creates 6 documents where the value of the field is 0, 1, 2, 3, // 3, 5 builders.add( - client().prepareIndex("idx_gappy") - .setId("" + i) + prepareIndex("idx_gappy").setId("" + i) .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i == 4 ? 3 : i).endObject()) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index 74acaf95bd24a..e4bb11247d230 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -27,6 +26,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; @@ -43,15 +43,16 @@ public void testAllowPartialsWithRedState() throws Exception { final int numShards = cluster().numDataNodes() + 2; buildRedIndex(numShards); - SearchResponse searchResponse = prepareSearch().setSize(0).setAllowPartialSearchResults(true).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("Expect some shards failed", searchResponse.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); - assertThat("Expect no shards skipped", searchResponse.getSkippedShards(), equalTo(0)); - assertThat("Expect subset of shards successful", searchResponse.getSuccessfulShards(), lessThan(numShards)); - assertThat("Expected total shards", searchResponse.getTotalShards(), equalTo(numShards)); - for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); - } + assertResponse(prepareSearch().setSize(0).setAllowPartialSearchResults(true), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("Expect some shards failed", response.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); + assertThat("Expect no shards skipped", response.getSkippedShards(), equalTo(0)); + assertThat("Expect subset of shards successful", response.getSuccessfulShards(), lessThan(numShards)); + assertThat("Expected total shards", response.getTotalShards(), equalTo(numShards)); + for (ShardSearchFailure failure : response.getShardFailures()) { + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + } + }); } public void testClusterAllowPartialsWithRedState() throws Exception { @@ -60,18 +61,19 @@ public void testClusterAllowPartialsWithRedState() throws Exception { setClusterDefaultAllowPartialResults(true); - SearchResponse searchResponse = prepareSearch().setSize(0).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("Expect some shards failed", searchResponse.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); - assertThat("Expect no shards skipped", searchResponse.getSkippedShards(), equalTo(0)); - assertThat("Expect subset of shards successful", searchResponse.getSuccessfulShards(), lessThan(numShards)); - assertThat("Expected total shards", searchResponse.getTotalShards(), equalTo(numShards)); - for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); - assertThat(failure.getCause().getStackTrace(), emptyArray()); - // We don't write out the entire, repetitive stacktrace in the reason - assertThat(failure.reason(), equalTo("org.elasticsearch.action.NoShardAvailableActionException" + System.lineSeparator())); - } + assertResponse(prepareSearch().setSize(0), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("Expect some shards failed", response.getFailedShards(), allOf(greaterThan(0), lessThanOrEqualTo(numShards))); + assertThat("Expect no shards skipped", response.getSkippedShards(), equalTo(0)); + assertThat("Expect subset of shards successful", response.getSuccessfulShards(), lessThan(numShards)); + assertThat("Expected total shards", response.getTotalShards(), equalTo(numShards)); + for (ShardSearchFailure failure : response.getShardFailures()) { + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + assertThat(failure.getCause().getStackTrace(), emptyArray()); + // We don't write out the entire, repetitive stacktrace in the reason + assertThat(failure.reason(), equalTo("org.elasticsearch.action.NoShardAvailableActionException" + System.lineSeparator())); + } + }); } public void testDisallowPartialsWithRedState() throws Exception { @@ -107,7 +109,7 @@ private void buildRedIndex(int numShards) throws Exception { assertAcked(prepareCreate("test").setSettings(indexSettings(numShards, 0))); ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); + prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index eb6dd2f0767f1..97a400709cde7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -9,13 +9,13 @@ package org.elasticsearch.search.basic; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** @@ -54,7 +54,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) if (createIndex) { createIndex("test"); } - client().prepareIndex("test").setId(id).setSource("field", "test").get(); + prepareIndex("test").setId(id).setSource("field", "test").get(); RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); // at least one shard should be successful when refreshing assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); @@ -66,32 +66,37 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) // first, verify that search normal search works assertHitCount(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "test")), 1); Client client = client(); - SearchResponse searchResponse = client.prepareSearch("test") - .setPreference(preference + Integer.toString(counter++)) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - if (searchResponse.getHits().getTotalHits().value != 1) { - refresh(); - SearchResponse searchResponseAfterRefresh = client.prepareSearch("test") - .setPreference(preference) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - logger.info( - "hits count mismatch on any shard search failed, post explicit refresh hits are {}", - searchResponseAfterRefresh.getHits().getTotalHits().value - ); - ensureGreen(); - SearchResponse searchResponseAfterGreen = client.prepareSearch("test") - .setPreference(preference) - .setQuery(QueryBuilders.termQuery("field", "test")) - .get(); - logger.info( - "hits count mismatch on any shard search failed, post explicit wait for green hits are {}", - searchResponseAfterGreen.getHits().getTotalHits().value - ); - assertHitCount(searchResponse, 1); - } - assertHitCount(searchResponse, 1); + assertResponse( + client.prepareSearch("test") + .setPreference(preference + Integer.toString(counter++)) + .setQuery(QueryBuilders.termQuery("field", "test")), + searchResponse -> { + if (searchResponse.getHits().getTotalHits().value != 1) { + refresh(); + assertResponse( + client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")), + searchResponseAfterRefresh -> { + logger.info( + "hits count mismatch on any shard search failed, post explicit refresh hits are {}", + searchResponseAfterRefresh.getHits().getTotalHits().value + ); + ensureGreen(); + assertResponse( + client.prepareSearch("test") + .setPreference(preference) + .setQuery(QueryBuilders.termQuery("field", "test")), + searchResponseAfterGreen -> logger.info( + "hits count mismatch on any shard search failed, post explicit wait for green hits are {}", + searchResponseAfterGreen.getHits().getTotalHits().value + ) + ); + } + ); + assertHitCount(searchResponse, 1); + } + assertHitCount(searchResponse, 1); + } + ); status = clusterAdmin().prepareHealth("test").get().getStatus(); internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 24df07217a5a2..26d81f672d650 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Priority; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESIntegTestCase; @@ -25,6 +24,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.formatShardStatus; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -47,8 +47,7 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw final int numDocs = between(10, 20); for (int i = 0; i < numDocs; i++) { indexBuilders.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("test", "value") @@ -74,33 +73,34 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw public void run() { try { while (stop.get() == false) { - SearchResponse sr = prepareSearch().setSize(numDocs).get(); - if (sr.getHits().getTotalHits().value != numDocs) { - // if we did not search all shards but had no serious failures that is potentially fine - // if only the hit-count is wrong. this can happen if the cluster-state is behind when the - // request comes in. It's a small window but a known limitation. - if (sr.getTotalShards() != sr.getSuccessfulShards() - && Stream.of(sr.getShardFailures()) - .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { - nonCriticalExceptions.add( - "Count is " - + sr.getHits().getTotalHits().value - + " but " - + numDocs - + " was expected. " - + formatShardStatus(sr) - ); - } else { - assertHitCount(sr, numDocs); + assertResponse(prepareSearch().setSize(numDocs), response -> { + if (response.getHits().getTotalHits().value != numDocs) { + // if we did not search all shards but had no serious failures that is potentially fine + // if only the hit-count is wrong. this can happen if the cluster-state is behind when the + // request comes in. It's a small window but a known limitation. + if (response.getTotalShards() != response.getSuccessfulShards() + && Stream.of(response.getShardFailures()) + .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { + nonCriticalExceptions.add( + "Count is " + + response.getHits().getTotalHits().value + + " but " + + numDocs + + " was expected. " + + formatShardStatus(response) + ); + } else { + assertHitCount(response, numDocs); + } } - } - final SearchHits sh = sr.getHits(); - assertThat( - "Expected hits to be the same size the actual hits array", - sh.getTotalHits().value, - equalTo((long) (sh.getHits().length)) - ); + final SearchHits sh = response.getHits(); + assertThat( + "Expected hits to be the same size the actual hits array", + sh.getTotalHits().value, + equalTo((long) (sh.getHits().length)) + ); + }); // this is the more critical but that we hit the actual hit array has a different size than the // actual number of hits. } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index 6f701e956788b..6ebfc61830269 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -41,6 +40,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class SearchWithRandomExceptionsIT extends ESIntegTestCase { @@ -99,8 +99,7 @@ public void testRandomExceptions() throws IOException, InterruptedException, Exe boolean[] added = new boolean[numDocs]; for (int i = 0; i < numDocs; i++) { try { - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("" + i) + DocWriteResponse indexResponse = prepareIndex("test").setId("" + i) .setTimeout(TimeValue.timeValueSeconds(1)) .setSource("test", English.intToEnglish(i)) .get(); @@ -125,28 +124,36 @@ public void testRandomExceptions() throws IOException, InterruptedException, Exe NumShards test = getNumShards("test"); final int numSearches = scaledRandomIntBetween(100, 200); + final int finalNumCreated = numCreated; // we don't check anything here really just making sure we don't leave any open files or a broken index behind. for (int i = 0; i < numSearches; i++) { try { int docToQuery = between(0, numDocs - 1); int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) - .setSize(expectedResults) - .get(); - logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries); - if (searchResponse.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(expectedResults, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults), + response -> { + logger.info("Successful shards: [{}] numShards: [{}]", response.getSuccessfulShards(), test.numPrimaries); + if (response.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(expectedResults, response); + } + } + ); // check match all - searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setSize(numCreated) - .addSort("_id", SortOrder.ASC) - .get(); - logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries); - if (searchResponse.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(numCreated, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated).addSort("_id", SortOrder.ASC), + response -> { + logger.info( + "Match all Successful shards: [{}] numShards: [{}]", + response.getSuccessfulShards(), + test.numPrimaries + ); + if (response.getSuccessfulShards() == test.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(finalNumCreated, response); + } + } + ); } catch (SearchPhaseExecutionException ex) { logger.info("expected SearchPhaseException: [{}]", ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 54ad0cd7e0cff..33ef75b317e33 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -33,6 +32,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class SearchWithRandomIOExceptionsIT extends ESIntegTestCase { @@ -41,7 +41,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockFSIndexStore.TestPlugin.class); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99174") public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException { String mapping = Strings.toString( XContentFactory.jsonBuilder() @@ -83,7 +82,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc numInitialDocs = between(10, 100); ensureGreen(); for (int i = 0; i < numInitialDocs; i++) { - client().prepareIndex("test").setId("init" + i).setSource("test", "init").get(); + prepareIndex("test").setId("init" + i).setSource("test", "init").get(); } indicesAdmin().prepareRefresh("test").execute().get(); indicesAdmin().prepareFlush("test").execute().get(); @@ -122,8 +121,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc for (int i = 0; i < numDocs; i++) { added[i] = false; try { - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId(Integer.toString(i)) + DocWriteResponse indexResponse = prepareIndex("test").setId(Integer.toString(i)) .setTimeout(TimeValue.timeValueSeconds(1)) .setSource("test", English.intToEnglish(i)) .get(); @@ -148,32 +146,39 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc refreshResponse.getTotalShards() ); final int numSearches = scaledRandomIntBetween(10, 20); + final int finalNumCreated = numCreated; + final int finalNumInitialDocs = numInitialDocs; // we don't check anything here really just making sure we don't leave any open files or a broken index behind. for (int i = 0; i < numSearches; i++) { try { int docToQuery = between(0, numDocs - 1); int expectedResults = added[docToQuery] ? 1 : 0; logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery)); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))) - .setSize(expectedResults) - .get(); - logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(expectedResults, searchResponse); - } + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults), + response -> { + logger.info("Successful shards: [{}] numShards: [{}]", response.getSuccessfulShards(), numShards.numPrimaries); + if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(expectedResults, response); + } + } + ); // check match all - searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setSize(numCreated + numInitialDocs) - .addSort("_uid", SortOrder.ASC) - .get(); - logger.info( - "Match all Successful shards: [{}] numShards: [{}]", - searchResponse.getSuccessfulShards(), - numShards.numPrimaries + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()) + .setSize(numCreated + numInitialDocs) + .addSort("_uid", SortOrder.ASC), + response -> { + logger.info( + "Match all Successful shards: [{}] numShards: [{}]", + response.getSuccessfulShards(), + numShards.numPrimaries + ); + if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { + assertResultsAndLogOnFailure(finalNumCreated + finalNumInitialDocs, response); + } + } ); - if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) { - assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse); - } } catch (SearchPhaseExecutionException ex) { logger.info("SearchPhaseException: [{}]", ex.getMessage()); // if a scheduled refresh or flush fails all shards we see all shards failed here diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 54abecb5a1905..c4b0346170949 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; @@ -26,8 +25,10 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -55,15 +56,18 @@ public void testFailedSearchWithWrongQuery() throws Exception { assertThat(refreshResponse.getFailedShards(), equalTo(0)); for (int i = 0; i < 5; i++) { try { - SearchResponse searchResponse = client().search( - new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz"))) - ).actionGet(); - assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(0)); - assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries)); - fail("search should fail"); - } catch (ElasticsearchException e) { - assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); + assertResponse( + client().search(new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz")))), + response -> { + assertThat(response.getTotalShards(), equalTo(test.numPrimaries)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + assertThat(response.getFailedShards(), equalTo(test.numPrimaries)); + fail("search should fail"); + } + ); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) e.getCause()).unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); // all is well } } @@ -93,15 +97,18 @@ public void testFailedSearchWithWrongQuery() throws Exception { for (int i = 0; i < 5; i++) { try { - SearchResponse searchResponse = client().search( - new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz"))) - ).actionGet(); - assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(0)); - assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries)); - fail("search should fail"); - } catch (ElasticsearchException e) { - assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); + assertResponse( + client().search(new SearchRequest("test").source(new SearchSourceBuilder().query(new MatchQueryBuilder("foo", "biz")))), + response -> { + assertThat(response.getTotalShards(), equalTo(test.numPrimaries)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + assertThat(response.getFailedShards(), equalTo(test.numPrimaries)); + fail("search should fail"); + } + ); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) e.getCause()).unwrapCause(), instanceOf(SearchPhaseExecutionException.class)); // all is well } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index 54cff6efe3d17..e18c37aff783b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -43,6 +42,8 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -287,15 +288,15 @@ public void testSimpleFacets() throws Exception { .aggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.filter("all", termQuery("multi", "test")))) .aggregation(AggregationBuilders.filter("test1", termQuery("name", "test1"))); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(sourceBuilder)).actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertNoFailuresAndResponse(client().search(new SearchRequest("test").source(sourceBuilder)), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(100L)); - Global global = searchResponse.getAggregations().get("global"); - Filter all = global.getAggregations().get("all"); - Filter test1 = searchResponse.getAggregations().get("test1"); - assertThat(test1.getDocCount(), equalTo(1L)); - assertThat(all.getDocCount(), equalTo(100L)); + Global global = response.getAggregations().get("global"); + Filter all = global.getAggregations().get("all"); + Filter test1 = response.getAggregations().get("test1"); + assertThat(test1.getDocCount(), equalTo(1L)); + assertThat(all.getDocCount(), equalTo(100L)); + }); } public void testFailedSearchWithWrongQuery() throws Exception { @@ -352,20 +353,22 @@ public void testFailedMultiSearchWithWrongQuery() throws Exception { logger.info("Start Testing failed multi search with a wrong query"); - MultiSearchResponse response = client().prepareMultiSearch() - .add(prepareSearch("test").setQuery(new MatchQueryBuilder("foo", "biz"))) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) - .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertThat(response.getResponses().length, equalTo(3)); - assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); - - assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().prepareMultiSearch() + .add(prepareSearch("test").setQuery(new MatchQueryBuilder("foo", "biz"))) + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) + .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertThat(response.getResponses().length, equalTo(3)); + assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); - assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); + assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + } + ); logger.info("Done Testing failed search"); } @@ -374,28 +377,30 @@ public void testFailedMultiSearchWithWrongQueryWithFunctionScore() throws Except logger.info("Start Testing failed multi search with a wrong query"); - MultiSearchResponse response = client().prepareMultiSearch() - // Add custom score query with bogus script - .add( - prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery( - QueryBuilders.termQuery("nid", 1), - new ScriptScoreFunctionBuilder(new Script(ScriptType.INLINE, "bar", "foo", Collections.emptyMap())) + assertResponse( + client().prepareMultiSearch() + // Add custom score query with bogus script + .add( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery( + QueryBuilders.termQuery("nid", 1), + new ScriptScoreFunctionBuilder(new Script(ScriptType.INLINE, "bar", "foo", Collections.emptyMap())) + ) ) ) - ) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) - .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - assertThat(response.getResponses().length, equalTo(3)); - assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2))) + .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())), + response -> { + assertThat(response.getResponses().length, equalTo(3)); + assertThat(response.getResponses()[0].getFailureMessage(), notNullValue()); - assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); - - assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); - assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + assertThat(response.getResponses()[1].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[1].getResponse().getHits().getHits().length, equalTo(1)); + assertThat(response.getResponses()[2].getFailureMessage(), nullValue()); + assertThat(response.getResponses()[2].getResponse().getHits().getHits().length, equalTo(10)); + } + ); logger.info("Done Testing failed search"); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java index 2ddbbec5bc1c8..582df3a5bb396 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java @@ -13,7 +13,6 @@ import org.apache.lucene.index.PointValues; import org.elasticsearch.action.search.CanMatchNodeRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -34,7 +33,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.AbstractMultiClustersTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; @@ -46,6 +44,9 @@ import java.util.List; import java.util.Optional; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.in; @@ -103,7 +104,7 @@ protected Collection> nodePlugins(String clusterAlias) { int createIndexAndIndexDocs(String cluster, String index, int numberOfShards, long timestamp, boolean exposeTimestamp) throws Exception { Client client = client(cluster); - ElasticsearchAssertions.assertAcked( + assertAcked( client.admin() .indices() .prepareCreate(index) @@ -175,11 +176,12 @@ public void testCanMatchOnTimeRange() throws Exception { SearchSourceBuilder source = new SearchSourceBuilder().query(new RangeQueryBuilder("@timestamp").from(timestamp)); SearchRequest request = new SearchRequest("local_*", "*:remote_*"); request.source(source).setCcsMinimizeRoundtrips(minimizeRoundTrips); - SearchResponse searchResp = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResp, localDocs + remoteDocs); - int totalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; - assertThat(searchResp.getTotalShards(), equalTo(totalShards)); - assertThat(searchResp.getSkippedShards(), equalTo(oldLocalNumShards + oldRemoteNumShards)); + assertResponse(client().search(request), response -> { + assertHitCount(response, localDocs + remoteDocs); + int totalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; + assertThat(response.getTotalShards(), equalTo(totalShards)); + assertThat(response.getSkippedShards(), equalTo(oldLocalNumShards + oldRemoteNumShards)); + }); } } finally { for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index 1596a9a7e28a8..cf8d81f406f91 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -13,13 +13,13 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -67,6 +67,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -130,13 +131,14 @@ public void testRemoteClusterClientRole() throws Exception { .toList() ); - final SearchResponse resp = localCluster.client(nodeWithRemoteClusterClientRole) - .prepareSearch("demo", "cluster_a:prod") - .setQuery(new MatchAllQueryBuilder()) - .setAllowPartialSearchResults(false) - .setSize(1000) - .get(); - assertHitCount(resp, demoDocs + prodDocs); + assertHitCount( + localCluster.client(nodeWithRemoteClusterClientRole) + .prepareSearch("demo", "cluster_a:prod") + .setQuery(new MatchAllQueryBuilder()) + .setAllowPartialSearchResults(false) + .setSize(1000), + demoDocs + prodDocs + ); } public void testProxyConnectionDisconnect() throws Exception { @@ -238,7 +240,7 @@ public void testCancel() throws Exception { final TaskInfo rootTask = client().admin() .cluster() .prepareListTasks() - .setActions(SearchAction.INSTANCE.name()) + .setActions(TransportSearchAction.TYPE.name()) .get() .getTasks() .stream() @@ -272,7 +274,7 @@ public void testCancel() throws Exception { for (TransportService transportService : transportServices) { Collection cancellableTasks = transportService.getTaskManager().getCancellableTasks().values(); for (CancellableTask cancellableTask : cancellableTasks) { - if (cancellableTask.getAction().contains(SearchAction.INSTANCE.name())) { + if (cancellableTask.getAction().contains(TransportSearchAction.TYPE.name())) { assertTrue(cancellableTask.getDescription(), cancellableTask.isCancelled()); } } @@ -398,17 +400,21 @@ public void testLookupFields() throws Exception { .fetchField("to"); SearchRequest request = new SearchRequest("cluster_a:remote_calls").source(searchSourceBuilder); request.setCcsMinimizeRoundtrips(randomBoolean()); - SearchResponse searchResponse = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 2); - SearchHit hit0 = searchResponse.getHits().getHits()[0]; - assertThat(hit0.getIndex(), equalTo("remote_calls")); - assertThat(hit0.field("from"), nullValue()); - assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit1 = searchResponse.getHits().getHits()[1]; - assertThat(hit1.getIndex(), equalTo("remote_calls")); - assertThat(hit1.field("from").getValues(), contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B")))); - assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + assertResponse(client().search(request), response -> { + ElasticsearchAssertions.assertHitCount(response, 2); + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getIndex(), equalTo("remote_calls")); + assertThat(hit0.field("from"), nullValue()); + assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getIndex(), equalTo("remote_calls")); + assertThat( + hit1.field("from").getValues(), + contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B"))) + ); + assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + }); } // Search on both clusters { @@ -419,22 +425,26 @@ public void testLookupFields() throws Exception { .fetchField("to"); SearchRequest request = new SearchRequest("local_calls", "cluster_a:remote_calls").source(searchSourceBuilder); request.setCcsMinimizeRoundtrips(randomBoolean()); - SearchResponse searchResponse = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 3); - SearchHit hit0 = searchResponse.getHits().getHits()[0]; - assertThat(hit0.getIndex(), equalTo("remote_calls")); - assertThat(hit0.field("from"), nullValue()); - assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit1 = searchResponse.getHits().getHits()[1]; - assertThat(hit1.getIndex(), equalTo("remote_calls")); - assertThat(hit1.field("from").getValues(), contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B")))); - assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); - - SearchHit hit2 = searchResponse.getHits().getHits()[2]; - assertThat(hit2.getIndex(), equalTo("local_calls")); - assertThat(hit2.field("from").getValues(), contains(Map.of("name", List.of("Local A")))); - assertThat(hit2.field("to").getValues(), contains(Map.of("name", List.of("Local B")), Map.of("name", List.of("Local C")))); + assertResponse(client().search(request), response -> { + assertHitCount(response, 3); + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getIndex(), equalTo("remote_calls")); + assertThat(hit0.field("from"), nullValue()); + assertThat(hit0.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getIndex(), equalTo("remote_calls")); + assertThat( + hit1.field("from").getValues(), + contains(Map.of("name", List.of("Remote A")), Map.of("name", List.of("Remote B"))) + ); + assertThat(hit1.field("to").getValues(), contains(Map.of("name", List.of("Remote C")))); + + SearchHit hit2 = response.getHits().getHits()[2]; + assertThat(hit2.getIndex(), equalTo("local_calls")); + assertThat(hit2.field("from").getValues(), contains(Map.of("name", List.of("Local A")))); + assertThat(hit2.field("to").getValues(), contains(Map.of("name", List.of("Local B")), Map.of("name", List.of("Local C")))); + }); } } @@ -518,7 +528,7 @@ public void testSearchShardsWithIndexNameQuery() { { QueryBuilder query = new TermQueryBuilder("_index", "cluster_a:my_index"); SearchShardsRequest request = new SearchShardsRequest(indices, indicesOptions, query, null, null, randomBoolean(), "cluster_a"); - SearchShardsResponse resp = remoteClient.execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = remoteClient.execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(numShards)); for (SearchShardsGroup group : resp.getGroups()) { assertFalse(group.skipped()); @@ -535,7 +545,7 @@ public void testSearchShardsWithIndexNameQuery() { randomBoolean(), randomFrom("cluster_b", null) ); - SearchShardsResponse resp = remoteClient.execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = remoteClient.execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(numShards)); for (SearchShardsGroup group : resp.getGroups()) { assertTrue(group.skipped()); @@ -552,7 +562,7 @@ public void testSearchShardsWithIndexNameQuery() { randomBoolean(), randomFrom("cluster_a", "cluster_b", null) ); - SearchShardsResponse resp = remoteClient.execute(SearchShardsAction.INSTANCE, request).actionGet(); + SearchShardsResponse resp = remoteClient.execute(TransportSearchShardsAction.TYPE, request).actionGet(); assertThat(resp.getGroups(), hasSize(numShards)); for (SearchShardsGroup group : resp.getGroups()) { assertTrue(group.skipped()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 0be427a5fd09d..379cdfc990207 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -11,6 +11,8 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponse.Cluster; +import org.elasticsearch.action.search.SearchResponse.Clusters; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportSearchAction; @@ -41,6 +43,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -119,39 +122,40 @@ public void testClusterDetailsAfterSuccessfulCCS() throws Exception { } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); } // CCS with a search where the timestamp of the query cannot match so should be SUCCESSFUL with all shards skipped @@ -183,47 +187,49 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except searchRequest.source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - if (dfs) { - // with DFS_QUERY_THEN_FETCH, the local shards are never skipped - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); - } - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + if (dfs) { + // with DFS_QUERY_THEN_FETCH, the local shards are never skipped + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + } else { + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + } + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + } else { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); + } + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + }); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - if (clusters.isCcsMinimizeRoundtrips()) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); - } else { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); - } - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); } public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Exception { @@ -251,24 +257,25 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertOneFailedShard(localClusterSearchInfo, localNumShards); + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertOneFailedShard(localClusterSearchInfo, localNumShards); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + }); } // tests bug fix https://github.com/elastic/elasticsearch/issues/100350 @@ -296,39 +303,40 @@ public void testClusterDetailsAfterCCSWhereRemoteClusterHasNoShardsToSearch() th } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo("no_such_index*")); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // no shards since index does not exist - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertNotNull(remoteClusterSearchInfo.getTook()); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo("no_such_index*")); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // no shards since index does not exist + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertNotNull(remoteClusterSearchInfo.getTook()); + }); } public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws Exception { @@ -375,59 +383,58 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E Throwable rootCause = ExceptionsHelper.unwrap(ee.getCause(), IllegalStateException.class); assertThat(rootCause.getMessage(), containsString("index corrupted")); } else { - SearchResponse searchResponse = queryFuture.get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - if (dfs == false) { - assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); - } - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - if (skipUnavailable) { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - } else { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(1)); - } + assertResponse(queryFuture, response -> { + assertNotNull(response); - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + Clusters clusters = response.getClusters(); + if (dfs == false) { + assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); + } + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + if (skipUnavailable) { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + } else { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(1)); + } - assertNotNull(remoteClusterSearchInfo); - SearchResponse.Cluster.Status expectedStatus = skipUnavailable - ? SearchResponse.Cluster.Status.SKIPPED - : SearchResponse.Cluster.Status.FAILED; - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - if (clusters.isCcsMinimizeRoundtrips()) { - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - } else { - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); - } - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + + assertNotNull(remoteClusterSearchInfo); + Cluster.Status expectedStatus = skipUnavailable ? Cluster.Status.SKIPPED : Cluster.Status.FAILED; + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + } else { + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); + } + assertNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + }); } } @@ -458,40 +465,41 @@ public void testCCSWithSearchTimeoutOnRemoteCluster() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); searchRequest.source(sourceBuilder); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(2)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertTrue(localClusterSearchInfo.isTimedOut()); - assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertTrue(remoteClusterSearchInfo.isTimedOut()); - assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.PARTIAL)); + assertTrue(localClusterSearchInfo.isTimedOut()); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.PARTIAL)); + assertTrue(remoteClusterSearchInfo.isTimedOut()); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + }); } public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { @@ -513,29 +521,30 @@ public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { } searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); - - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); } public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exception { @@ -560,22 +569,22 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); - assertNotNull(searchResponse); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); + }); } public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { @@ -612,44 +621,43 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { Throwable rootCause = ExceptionsHelper.unwrap(ee, IllegalStateException.class); assertThat(rootCause.getMessage(), containsString("index corrupted")); } else { - SearchResponse searchResponse = queryFuture.get(); - assertNotNull(searchResponse); - SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.getTotal(), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); - if (skipUnavailable) { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(1)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); - } else { - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(1)); - } + assertResponse(queryFuture, response -> { + assertNotNull(response); + Clusters clusters = response.getClusters(); + assertThat(clusters.getTotal(), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + if (skipUnavailable) { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(1)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + } else { + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(1)); + } - assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); + assertNull(clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)); - SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); - assertNotNull(remoteClusterSearchInfo); - SearchResponse.Cluster.Status expectedStatus = skipUnavailable - ? SearchResponse.Cluster.Status.SKIPPED - : SearchResponse.Cluster.Status.FAILED; - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + Cluster.Status expectedStatus = skipUnavailable ? Cluster.Status.SKIPPED : Cluster.Status.FAILED; + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + assertNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + }); } } - private static void assertOneFailedShard(SearchResponse.Cluster cluster, int totalShards) { + private static void assertOneFailedShard(Cluster cluster, int totalShards) { assertNotNull(cluster); - assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(cluster.getStatus(), equalTo(Cluster.Status.PARTIAL)); assertThat(cluster.getTotalShards(), equalTo(totalShards)); assertThat(cluster.getSuccessfulShards(), equalTo(totalShards - 1)); assertThat(cluster.getSkippedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java index fa84353b7c9cb..8b6f4112cfc17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java @@ -31,6 +31,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; public class CrossClusterSearchLeakIT extends AbstractMultiClustersTestCase { @@ -136,18 +137,23 @@ public void testSearch() throws Exception { } for (ActionFuture future : futures) { - SearchResponse searchResponse = future.get(); - if (searchResponse.getScrollId() != null) { - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.scrollIds(List.of(searchResponse.getScrollId())); - client(LOCAL_CLUSTER).clearScroll(clearScrollRequest).get(); - } + assertResponse(future, response -> { + if (response.getScrollId() != null) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.scrollIds(List.of(response.getScrollId())); + try { + client(LOCAL_CLUSTER).clearScroll(clearScrollRequest).get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } - Terms terms = searchResponse.getAggregations().get("f"); - assertThat(terms.getBuckets().size(), equalTo(docs)); - for (Terms.Bucket bucket : terms.getBuckets()) { - assertThat(bucket.getDocCount(), equalTo(1L)); - } + Terms terms = response.getAggregations().get("f"); + assertThat(terms.getBuckets().size(), equalTo(docs)); + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket.getDocCount(), equalTo(1L)); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index b600098d82b33..15afd6897a40e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; @@ -39,7 +38,7 @@ import java.util.Objects; import static java.util.Collections.singletonList; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.equalTo; @@ -72,21 +71,22 @@ public void testPlugin() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse response = prepareSearch().setSource( - new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test"))) - ).get(); - assertNoFailures(response); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"), - equalTo(2) - ); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("am"), - equalTo(2) - ); - assertThat( - ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("sam"), - equalTo(1) + assertNoFailuresAndResponse( + prepareSearch().setSource(new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test")))), + response -> { + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("i"), + equalTo(2) + ); + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("am"), + equalTo(2) + ); + assertThat( + ((Map) response.getHits().getAt(0).field("term_vectors_fetch").getValues().get(0)).get("sam"), + equalTo(1) + ); + } ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 00c5342577231..607c6596d15c9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; @@ -27,6 +26,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -52,6 +52,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -101,8 +102,7 @@ public void testSimpleNested() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -121,8 +121,7 @@ public void testSimpleNested() throws Exception { ) ); requests.add( - client().prepareIndex("articles") - .setId("2") + prepareIndex("articles").setId("2") .setSource( jsonBuilder().startObject() .field("title", "big gray elephant") @@ -142,77 +141,84 @@ public void testSimpleNested() throws Exception { ); indexRandom(true, requests); - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); - assertThat(innerHits.getTotalHits().value, equalTo(2L)); - assertThat(innerHits.getHits().length, equalTo(2)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(1).getId(), equalTo("1")); - assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getShard(), notNullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); - assertThat(innerHits.getTotalHits().value, equalTo(3L)); - assertThat(innerHits.getHits().length, equalTo(3)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(1).getId(), equalTo("2")); - assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(innerHits.getAt(2).getId(), equalTo("2")); - assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2)); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message")) - .setExplain(true) - .addFetchField("comments.mes*") - .addScriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .setSize(1) - ) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(2L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat( - innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(), - equalTo("fox eat quick") + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); + assertThat(innerHits.getTotalHits().value, equalTo(2L)); + assertThat(innerHits.getHits().length, equalTo(2)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(1).getId(), equalTo("1")); + assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); + } ); - assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(comments.message:fox in")); - assertThat( - innerHits.getAt(0).getFields().get("comments").getValue(), - equalTo(Collections.singletonMap("message", Collections.singletonList("fox eat quick"))) + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder("comment")) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getShard(), notNullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); + assertThat(innerHits.getTotalHits().value, equalTo(3L)); + assertThat(innerHits.getHits().length, equalTo(3)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(1).getId(), equalTo("2")); + assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(innerHits.getAt(2).getId(), equalTo("2")); + assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message")) + .setExplain(true) + .addFetchField("comments.mes*") + .addScriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .setSize(1) + ) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(2L)); + assertThat(innerHits.getHits().length, equalTo(1)); + HighlightField highlightField = innerHits.getAt(0).getHighlightFields().get("comments.message"); + assertThat(highlightField.fragments()[0].string(), equalTo("fox eat quick")); + assertThat(innerHits.getAt(0).getExplanation().toString(), containsString("weight(comments.message:fox in")); + assertThat( + innerHits.getAt(0).getFields().get("comments").getValue(), + equalTo(Collections.singletonMap("message", Collections.singletonList("fox eat quick"))) + ); + assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().addDocValueField("comments.mes*").setSize(1) + ) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); + } ); - assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5")); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().addDocValueField("comments.mes*").setSize(1) - ) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); } public void testRandomNested() throws Exception { @@ -234,7 +240,7 @@ public void testRandomNested() throws Exception { source.startObject().field("x", "y").endObject(); } source.endArray().endObject(); - requestBuilders.add(client().prepareIndex("idx").setId(Integer.toString(i)).setSource(source)); + requestBuilders.add(prepareIndex("idx").setId(Integer.toString(i)).setSource(source)); } indexRandom(true, requestBuilders); @@ -250,32 +256,31 @@ public void testRandomNested() throws Exception { new InnerHitBuilder("b").addSort(new FieldSortBuilder("_doc").order(SortOrder.ASC)).setSize(size) ) ); - SearchResponse searchResponse = prepareSearch("idx").setQuery(boolQuery).setSize(numDocs).addSort("foo", SortOrder.ASC).get(); - - assertNoFailures(searchResponse); - assertHitCount(searchResponse, numDocs); - assertThat(searchResponse.getHits().getHits().length, equalTo(numDocs)); - for (int i = 0; i < numDocs; i++) { - SearchHit searchHit = searchResponse.getHits().getAt(i); - assertThat(searchHit.getShard(), notNullValue()); - SearchHits inner = searchHit.getInnerHits().get("a"); - assertThat(inner.getTotalHits().value, equalTo((long) field1InnerObjects[i])); - for (int j = 0; j < field1InnerObjects[i] && j < size; j++) { - SearchHit innerHit = inner.getAt(j); - assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field1")); - assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); - assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); - } - - inner = searchHit.getInnerHits().get("b"); - assertThat(inner.getTotalHits().value, equalTo((long) field2InnerObjects[i])); - for (int j = 0; j < field2InnerObjects[i] && j < size; j++) { - SearchHit innerHit = inner.getAt(j); - assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field2")); - assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); - assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + assertNoFailuresAndResponse(prepareSearch("idx").setQuery(boolQuery).setSize(numDocs).addSort("foo", SortOrder.ASC), response -> { + assertHitCount(response, numDocs); + assertThat(response.getHits().getHits().length, equalTo(numDocs)); + for (int i = 0; i < numDocs; i++) { + SearchHit searchHit = response.getHits().getAt(i); + assertThat(searchHit.getShard(), notNullValue()); + SearchHits inner = searchHit.getInnerHits().get("a"); + assertThat(inner.getTotalHits().value, equalTo((long) field1InnerObjects[i])); + for (int j = 0; j < field1InnerObjects[i] && j < size; j++) { + SearchHit innerHit = inner.getAt(j); + assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field1")); + assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); + assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + } + + inner = searchHit.getInnerHits().get("b"); + assertThat(inner.getTotalHits().value, equalTo((long) field2InnerObjects[i])); + for (int j = 0; j < field2InnerObjects[i] && j < size; j++) { + SearchHit innerHit = inner.getAt(j); + assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field2")); + assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j)); + assertThat(innerHit.getNestedIdentity().getChild(), nullValue()); + } } - } + }); } public void testNestedMultipleLayers() throws Exception { @@ -311,8 +316,7 @@ public void testNestedMultipleLayers() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -338,8 +342,7 @@ public void testNestedMultipleLayers() throws Exception { ) ); requests.add( - client().prepareIndex("articles") - .setId("2") + prepareIndex("articles").setId("2") .setSource( jsonBuilder().startObject() .field("title", "big gray elephant") @@ -359,140 +362,154 @@ public void testNestedMultipleLayers() throws Exception { indexRandom(true, requests); // Check we can load the first doubly-nested document. - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); // Check we can load the second doubly-nested document. - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "neutral"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("1")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("1")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "neutral"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("1")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("1")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); // Directly refer to the second level: - response = prepareSearch("articles").setQuery( - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") - ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertSearchHit(response, 1, hasId("2")); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertThat(innerHits.getTotalHits().value, equalTo(1L)); - assertThat(innerHits.getHits().length, equalTo(1)); - assertThat(innerHits.getAt(0).getId(), equalTo("2")); - assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); - assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - - // Check that inner hits contain _source even when it's disabled on the parent request. - response = prepareSearch("articles").setFetchSource(false) - .setQuery( + new InnerHitBuilder() + ) + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( nestedQuery( "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg).innerHit( new InnerHitBuilder("remark") ), ScoreMode.Avg ).innerHit(new InnerHitBuilder()) - ) - .get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertNotNull(innerHits.getAt(0).getSourceAsMap()); - assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); - - response = prepareSearch("articles").setQuery( - nestedQuery( - "comments", - nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( - new InnerHitBuilder("remark") + ), + response -> { + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertThat(innerHits.getTotalHits().value, equalTo(1L)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + } + ); + // Check that inner hits contain _source even when it's disabled on the parent request. + assertNoFailuresAndResponse( + prepareSearch("articles").setFetchSource(false) + .setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder()) ), - ScoreMode.Avg - ).innerHit(new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)) - ).get(); - assertNoFailures(response); - innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); - innerHits = innerHits.getAt(0).getInnerHits().get("remark"); - assertNotNull(innerHits.getAt(0).getSourceAsMap()); - assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertNotNull(innerHits.getAt(0).getSourceAsMap()); + assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery( + "comments", + nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg).innerHit( + new InnerHitBuilder("remark") + ), + ScoreMode.Avg + ).innerHit(new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)) + ), + response -> { + SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments"); + innerHits = innerHits.getAt(0).getInnerHits().get("remark"); + assertNotNull(innerHits.getAt(0).getSourceAsMap()); + assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty()); + } + ); } // Issue #9723 @@ -501,8 +518,7 @@ public void testNestedDefinedAsObject() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -514,20 +530,23 @@ public void testNestedDefinedAsObject() throws Exception { ); indexRandom(true, requests); - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getId(), equalTo("1")); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), - equalTo("comments") + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getId(), equalTo("1")); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), + equalTo("comments") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); + } ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); } public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { @@ -554,8 +573,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { List requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -583,61 +601,66 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { ); indexRandom(true, requests); - SearchResponse resp1 = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.FETCH_SOURCE) - ) - ).get(); - assertNoFailures(resp1); - assertHitCount(resp1, 1); - SearchHit parent = resp1.getHits().getAt(0); - assertThat(parent.getId(), equalTo("1")); - SearchHits inner = parent.getInnerHits().get("comments.messages"); - assertThat(inner.getTotalHits().value, equalTo(2L)); - assertThat(inner.getAt(0).getSourceAsString(), equalTo("{\"message\":\"no fox\"}")); - assertThat(inner.getAt(1).getSourceAsString(), equalTo("{\"message\":\"fox eat quick\"}")); - - SearchResponse response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - SearchHit hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - SearchHits messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(2L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(2)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); - assertThat(messages.getAt(1).getId(), equalTo("1")); - assertThat(messages.getAt(1).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(messages.getAt(1).getNestedIdentity().getChild(), nullValue()); - - response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(1L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); - + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit parent = response.getHits().getAt(0); + assertThat(parent.getId(), equalTo("1")); + SearchHits inner = parent.getInnerHits().get("comments.messages"); + assertThat(inner.getTotalHits().value, equalTo(2L)); + assertThat(inner.getAt(0).getSourceAsString(), equalTo("{\"message\":\"no fox\"}")); + assertThat(inner.getAt(1).getSourceAsString(), equalTo("{\"message\":\"fox eat quick\"}")); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(2L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(2)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + assertThat(messages.getAt(1).getId(), equalTo("1")); + assertThat(messages.getAt(1).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(1).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(1).getNestedIdentity().getChild(), nullValue()); + } + ); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(1L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + } + ); // index the message in an object form instead of an array requests = new ArrayList<>(); requests.add( - client().prepareIndex("articles") - .setId("1") + prepareIndex("articles").setId("1") .setSource( jsonBuilder().startObject() .field("title", "quick brown fox") @@ -650,21 +673,24 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { ) ); indexRandom(true, requests); - response = prepareSearch("articles").setQuery( - nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - hit = response.getHits().getAt(0); - assertThat(hit.getId(), equalTo("1")); - messages = hit.getInnerHits().get("comments.messages"); - assertThat(messages.getTotalHits().value, equalTo(1L)); - assertThat(messages.getAt(0).getId(), equalTo("1")); - assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); - assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + assertNoFailuresAndResponse( + prepareSearch("articles").setQuery( + nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox"), ScoreMode.Avg).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getId(), equalTo("1")); + SearchHits messages = hit.getInnerHits().get("comments.messages"); + assertThat(messages.getTotalHits().value, equalTo(1L)); + assertThat(messages.getAt(0).getId(), equalTo("1")); + assertThat(messages.getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(messages.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(messages.getAt(0).getNestedIdentity().getChild(), nullValue()); + } + ); } public void testMatchesQueriesNestedInnerHits() throws Exception { @@ -691,8 +717,7 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { List requests = new ArrayList<>(); int numDocs = randomIntBetween(2, 35); requests.add( - client().prepareIndex("test") - .setId("0") + prepareIndex("test").setId("0") .setSource( jsonBuilder().startObject() .field("field1", 0) @@ -710,8 +735,7 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { ) ); requests.add( - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", 1) @@ -731,8 +755,7 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { for (int i = 2; i < numDocs; i++) { requests.add( - client().prepareIndex("test") - .setId(String.valueOf(i)) + prepareIndex("test").setId(String.valueOf(i)) .setSource( jsonBuilder().startObject() .field("field1", i) @@ -760,34 +783,33 @@ public void testMatchesQueriesNestedInnerHits() throws Exception { query = nestedQuery("nested1", query, ScoreMode.Avg).innerHit( new InnerHitBuilder().addSort(new FieldSortBuilder("nested1.n_field1").order(SortOrder.ASC)) ); - SearchResponse searchResponse = prepareSearch("test").setQuery(query).setSize(numDocs).addSort("field1", SortOrder.ASC).get(); - assertNoFailures(searchResponse); - assertAllSuccessful(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("0")); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getTotalHits().value, equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test1")); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries()[0], equalTo("test3")); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test2")); - - for (int i = 2; i < numDocs; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(String.valueOf(i))); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test3")); - } + assertNoFailuresAndResponse(prepareSearch("test").setQuery(query).setSize(numDocs).addSort("field1", SortOrder.ASC), response -> { + assertAllSuccessful(response); + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertThat(response.getHits().getAt(0).getId(), equalTo("0")); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested1").getAt(1).getMatchedQueries()[0], equalTo("test3")); + + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(1).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test2")); + + for (int i = 2; i < numDocs; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(String.valueOf(i))); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(i).getInnerHits().get("nested1").getAt(0).getMatchedQueries()[0], equalTo("test3")); + } + }); } public void testNestedSource() throws Exception { assertAcked(prepareCreate("index1").setMapping("comments", "type=nested")); - client().prepareIndex("index1") - .setId("1") + prepareIndex("index1").setId("1") .setSource( jsonBuilder().startObject() .field("message", "quick brown fox") @@ -812,71 +834,83 @@ public void testNestedSource() throws Exception { // the field name (comments.message) used for source filtering should be the same as when using that field for // other features (like in the query dsl or aggs) in order for consistency: - SearchResponse response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.message" }, null)) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), - equalTo("fox eat quick") - ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), - equalTo("fox ate rabbit x y z") + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit( + new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.message" }, null)) + ) + ), + response -> { + assertHitCount(response, 1); + + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), + equalTo("fox eat quick") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), + equalTo("fox ate rabbit x y z") + ); + } ); - response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder()) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), - equalTo("fox eat quick") - ); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), - equalTo("fox ate rabbit x y z") + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"), + equalTo("fox eat quick") + ); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat( + response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"), + equalTo("fox ate rabbit x y z") + ); + } ); // Source filter on a field that does not exist inside the nested document and just check that we do not fail and // return an empty _source: - response = prepareSearch().setQuery( - nestedQuery("comments", matchQuery("comments.message", "away"), ScoreMode.None).innerHit( - new InnerHitBuilder().setFetchSourceContext(FetchSourceContext.of(true, new String[] { "comments.missing_field" }, null)) - ) - ).get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(0)); - + assertNoFailuresAndResponse( + prepareSearch().setQuery( + nestedQuery("comments", matchQuery("comments.message", "away"), ScoreMode.None).innerHit( + new InnerHitBuilder().setFetchSourceContext( + FetchSourceContext.of(true, new String[] { "comments.missing_field" }, null) + ) + ) + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(0)); + } + ); // Check that inner hits contain _source even when it's disabled on the root request. - response = prepareSearch().setFetchSource(false) - .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder())) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); - assertFalse(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().isEmpty()); + assertNoFailuresAndResponse( + prepareSearch().setFetchSource(false) + .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder())), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value, equalTo(2L)); + assertFalse(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().isEmpty()); + } + ); } public void testInnerHitsWithIgnoreUnmapped() throws Exception { assertAcked(prepareCreate("index1").setMapping("nested_type", "type=nested")); createIndex("index2"); - client().prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); - client().prepareIndex("index2").setId("3").setSource("key", "value").get(); + prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); + prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); assertSearchHitsWithoutFailures( @@ -897,8 +931,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { Settings.builder().put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), ArrayUtil.MAX_ARRAY_LENGTH), "index2" ); - client().prepareIndex("index2") - .setId("1") + prepareIndex("index2").setId("1") .setSource( jsonBuilder().startObject().startArray("nested").startObject().field("field", "value1").endObject().endArray().endObject() ) @@ -913,8 +946,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { public void testTooHighResultWindow() throws Exception { assertAcked(prepareCreate("index2").setMapping("nested", "type=nested")); - client().prepareIndex("index2") - .setId("1") + prepareIndex("index2").setId("1") .setSource( jsonBuilder().startObject().startArray("nested").startObject().field("field", "value1").endObject().endArray().endObject() ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java index d7347ef21328f..c996725e6285e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.fetch.subphase; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -32,6 +31,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termsQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasKey; @@ -41,253 +41,280 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "test1", "number", 1).get(); - client().prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); - client().prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); + prepareIndex("test").setId("1").setSource("name", "test1", "number", 1).get(); + prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); + prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().must(matchAllQuery()) - .filter( - boolQuery().should(rangeQuery("number").lt(2).queryName("test1")).should(rangeQuery("number").gte(2).queryName("test2")) - ) - ).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("3") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); - assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); - } else if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); - assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().must(matchAllQuery()) + .filter( + boolQuery().should(rangeQuery("number").lt(2).queryName("test1")) + .should(rangeQuery("number").gte(2).queryName("test2")) + ) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("3") || hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); + } else if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery( - boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) - ).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); - assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); - } else if (hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); - assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery( + boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); + } else if (hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); - client().prepareIndex("test").setId("2").setSource("name", "test").get(); - client().prepareIndex("test").setId("3").setSource("name", "test").get(); + prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); + prepareIndex("test").setId("2").setSource("name", "test").get(); + prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter( - boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) - ) - .get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter( + boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else if (hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter( - boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) - ) - .get(); - - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter( + boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) + ), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else if (hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); - client().prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); - client().prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); + prepareIndex("test").setId("1").setSource("name", "test", "title", "title1").get(); + prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); + prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title")) - ).setPostFilter(termQuery("name", "test").queryName("name")).get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title")) + ).setPostFilter(termQuery("name", "test").queryName("name")), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } - - searchResponse = prepareSearch().setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title")) - .setPostFilter(matchQuery("name", "test").queryName("name")) - .get(); - assertHitCount(searchResponse, 3L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + ); + + assertResponse( + prepareSearch().setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title")) + .setPostFilter(matchQuery("name", "test").queryName("name")), + response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } public void testRegExpQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); - assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); + assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testPrefixQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); - assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); + assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testFuzzyQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); - assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); + assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testWildcardQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); + prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")), response -> { + assertHitCount(response, 1L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); - assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); + assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } } - } + }); } public void testSpanFirstQuerySupportsName() { createIndex("test1"); ensureGreen(); - client().prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); + prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span") - ).get(); - assertHitCount(searchResponse, 1L); - - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); - assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery(QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span")), + response -> { + assertHitCount(response, 1L); + + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); + assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } /** @@ -297,33 +324,36 @@ public void testMatchedWithShould() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); - client().prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); + prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); + prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); refresh(); // Execute search at least two times to load it in cache int iter = scaledRandomIntBetween(2, 10); for (int i = 0; i < iter; i++) { - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().minimumShouldMatch(1) - .should(queryStringQuery("dolor").queryName("dolor")) - .should(queryStringQuery("elit").queryName("elit")) - ).get(); - - assertHitCount(searchResponse, 2L); - for (SearchHit hit : searchResponse.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); - assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); - } else if (hit.getId().equals("2")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); - assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); + assertResponse( + prepareSearch().setQuery( + boolQuery().minimumShouldMatch(1) + .should(queryStringQuery("dolor").queryName("dolor")) + .should(queryStringQuery("elit").queryName("elit")) + ), + response -> { + assertHitCount(response, 2L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); + assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); + } else if (hit.getId().equals("2")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); + assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } } - } + ); } } @@ -331,7 +361,7 @@ public void testMatchedWithWrapperQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); + prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); refresh(); MatchQueryBuilder matchQueryBuilder = matchQuery("content", "amet").queryName("abc"); @@ -340,12 +370,13 @@ public void testMatchedWithWrapperQuery() throws Exception { BytesReference termBytes = XContentHelper.toXContent(termQueryBuilder, XContentType.JSON, false); QueryBuilder[] queries = new QueryBuilder[] { wrapperQuery(matchBytes), constantScoreQuery(wrapperQuery(termBytes)) }; for (QueryBuilder query : queries) { - SearchResponse searchResponse = prepareSearch().setQuery(query).get(); - assertHitCount(searchResponse, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); - assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); + assertResponse(prepareSearch().setQuery(query), response -> { + assertHitCount(response, 1L); + SearchHit hit = response.getHits().getAt(0); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); + assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); + }); } } @@ -353,20 +384,23 @@ public void testMatchedWithRescoreQuery() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("content", "hello world").get(); - client().prepareIndex("test").setId("2").setSource("content", "hello you").get(); + prepareIndex("test").setId("1").setSource("content", "hello world").get(); + prepareIndex("test").setId("2").setSource("content", "hello you").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(new MatchAllQueryBuilder().queryName("all")) - .setRescorer( - new QueryRescorerBuilder(new MatchPhraseQueryBuilder("content", "hello you").boost(10).queryName("rescore_phrase")) - ) - .get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries(), equalTo(new String[] { "all", "rescore_phrase" })); - - assertThat(searchResponse.getHits().getAt(1).getMatchedQueries().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(1).getMatchedQueries(), equalTo(new String[] { "all" })); + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder().queryName("all")) + .setRescorer( + new QueryRescorerBuilder(new MatchPhraseQueryBuilder("content", "hello you").boost(10).queryName("rescore_phrase")) + ), + response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getAt(0).getMatchedQueries().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getMatchedQueries(), equalTo(new String[] { "all", "rescore_phrase" })); + + assertThat(response.getHits().getAt(1).getMatchedQueries().length, equalTo(1)); + assertThat(response.getHits().getAt(1).getMatchedQueries(), equalTo(new String[] { "all" })); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index 6b790f9e6f090..8aaa1bce252d7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.fetch.subphase.highlight; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -22,6 +21,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; /** @@ -39,16 +39,17 @@ protected Collection> nodePlugins() { protected void setup() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("name", "arbitrary content", "other_name", "foo", "other_other_name", "bar"), - client().prepareIndex("test").setId("2").setSource("other_name", "foo", "other_other_name", "bar") + prepareIndex("test").setId("1").setSource("name", "arbitrary content", "other_name", "foo", "other_other_name", "bar"), + prepareIndex("test").setId("2").setSource("other_name", "foo", "other_other_name", "bar") ); } public void testThatCustomHighlightersAreSupported() throws IOException { - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")) - .get(); - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom")), + response -> assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")) + ); } public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception { @@ -58,44 +59,49 @@ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception options.put("myFieldOption", "someValue"); highlightConfig.options(options); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field(highlightConfig)) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myFieldOption:someValue")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).highlighter(new HighlightBuilder().field(highlightConfig)), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "name", 1, equalTo("field:myFieldOption:someValue")); + } + ); } public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception { Map options = new HashMap<>(); options.put("myGlobalOption", "someValue"); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myGlobalOption:someValue")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .highlighter(new HighlightBuilder().field("name").highlighterType("test-custom").options(options)), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "name", 1, equalTo("field:myGlobalOption:someValue")); + } + ); } public void testThatCustomHighlighterReceivesFieldsInOrder() throws Exception { - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders.termQuery("name", "arbitrary")) - ) - .highlighter( - new HighlightBuilder().highlighterType("test-custom") - .field("name") - .field("other_name") - .field("other_other_name") - .useExplicitFieldOrder(true) + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders.termQuery("name", "arbitrary")) ) - .get(); - - assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 0, "other_name", 0, equalTo("standard response for other_name at position 2")); - assertHighlight(searchResponse, 0, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); - assertHighlight(searchResponse, 1, "name", 0, equalTo("standard response for name at position 1")); - assertHighlight(searchResponse, 1, "other_name", 0, equalTo("standard response for other_name at position 2")); - assertHighlight(searchResponse, 1, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + .highlighter( + new HighlightBuilder().highlighterType("test-custom") + .field("name") + .field("other_name") + .field("other_other_name") + .useExplicitFieldOrder(true) + ), + response -> { + assertHighlight(response, 0, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 0, "other_name", 0, equalTo("standard response for other_name at position 2")); + assertHighlight(response, 0, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + assertHighlight(response, 1, "name", 0, equalTo("standard response for name at position 1")); + assertHighlight(response, 1, "other_name", 0, equalTo("standard response for other_name at position 2")); + assertHighlight(response, 1, "other_other_name", 0, equalTo("standard response for other_other_name at position 3")); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 79a28a053b3c2..5c189c0c6c96a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoPoint; @@ -94,7 +93,9 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNotHighlighted; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -128,24 +129,25 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().array("tags", "foo bar", "foo bar", "foo bar", "foo baz").field("sort", 1).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().array("tags", "foo baz", "foo baz", "foo baz", "foo bar").field("sort", 2).endObject()) .get(); refresh(); for (BoundaryScannerType scanner : BoundaryScannerType.values()) { - SearchResponse search = prepareSearch().addSort(SortBuilders.fieldSort("sort")) - .setQuery(matchQuery("tags", "foo bar")) - .highlighter(new HighlightBuilder().field(new Field("tags")).numOfFragments(2).boundaryScannerType(scanner)) - .get(); - assertHighlight(search, 0, "tags", 0, 2, equalTo("foo bar")); - assertHighlight(search, 0, "tags", 1, 2, equalTo("foo bar")); - assertHighlight(search, 1, "tags", 0, 1, equalTo("foo bar")); + assertResponse( + prepareSearch().addSort(SortBuilders.fieldSort("sort")) + .setQuery(matchQuery("tags", "foo bar")) + .highlighter(new HighlightBuilder().field(new Field("tags")).numOfFragments(2).boundaryScannerType(scanner)), + response -> { + assertHighlight(response, 0, "tags", 0, 2, equalTo("foo bar")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("foo bar")); + assertHighlight(response, 1, "tags", 0, 1, equalTo("foo bar")); + } + ); } } @@ -162,12 +164,12 @@ public void testHighlightingWithStoredKeyword() throws IOException { .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); refresh(); - SearchResponse search = prepareSearch().setQuery(matchQuery("text", "foo")) - .highlighter(new HighlightBuilder().field(new Field("text"))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("text", "foo")).highlighter(new HighlightBuilder().field(new Field("text"))), + response -> assertHighlight(response, 0, "text", 0, equalTo("foo")) + ); } public void testHighlightingWithWildcardName() throws IOException { @@ -186,13 +188,14 @@ public void testHighlightingWithWildcardName() throws IOException { .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); refresh(); for (String type : ALL_TYPES) { - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("text")); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))), + response -> assertHighlight(response, 0, "text", 0, equalTo("text")) + ); } } @@ -214,14 +217,16 @@ public void testFieldAlias() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource("text", "foo").get(); + prepareIndex("test").setId("1").setSource("text", "foo").get(); refresh(); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo")) + ); } } @@ -244,14 +249,16 @@ public void testFieldAliasWithSourceLookup() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource("text", "foo bar").get(); + prepareIndex("test").setId("1").setSource("text", "foo bar").get(); refresh(); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "bar")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo bar")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "bar")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo bar")) + ); } } @@ -271,12 +278,14 @@ public void testFieldAliasWithWildcardField() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test").setId("1").setSource("keyword", "foo").get(); + prepareIndex("test").setId("1").setSource("keyword", "foo").get(); refresh(); HighlightBuilder builder = new HighlightBuilder().field(new Field("al*")).requireFieldMatch(false); - SearchResponse search = prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); - assertHighlight(search, 0, "alias", 0, equalTo("foo")); + assertResponse( + prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder), + response -> assertHighlight(response, 0, "alias", 0, equalTo("foo")) + ); } public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException { @@ -303,21 +312,21 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc .endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject()) .get(); refresh(); for (String type : ALL_TYPES) { - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))) - .get(); - assertHighlight(search, 0, "text", 0, equalTo("text")); - search = prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) - .highlighter(new HighlightBuilder().field(new Field("unstored_text"))) - .get(); - assertNoFailures(search); - assertThat(search.getHits().getAt(0).getHighlightFields().size(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(type))), + response -> assertHighlight(response, 0, "text", 0, equalTo("text")) + ); + assertNoFailuresAndResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))) + .highlighter(new HighlightBuilder().field(new Field("unstored_text"))), + response -> assertThat(response.getHits().getAt(0).getHighlightFields().size(), equalTo(0)) + ); } } @@ -328,12 +337,14 @@ public void testHighTermFrequencyDoc() throws IOException { for (int i = 0; i < 6000; i++) { builder.append("abc").append(" "); } - client().prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); + prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); refresh(); - SearchResponse search = prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))) - .highlighter(new HighlightBuilder().field("name")) - .get(); - assertHighlight(search, 0, "name", 0, startsWith("abc abc abc abc")); + assertResponse( + prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))).highlighter(new HighlightBuilder().field("name")), + response -> { + assertHighlight(response, 0, "name", 0, startsWith("abc abc abc abc")); + } + ); } public void testEnsureNoNegativeOffsets() throws Exception { @@ -346,8 +357,7 @@ public void testEnsureNoNegativeOffsets() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "no_long_term", "This is a test where foo is highlighed and should be highlighted", @@ -414,8 +424,7 @@ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource( XContentFactory.jsonBuilder() .startObject() @@ -433,22 +442,31 @@ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } - - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 0)) - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(response, i, "attachments.body", 1, equalTo("attachment 2")); + } + } + ); } @@ -481,8 +499,7 @@ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exce IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource( XContentFactory.jsonBuilder() .startObject() @@ -500,23 +517,32 @@ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exce } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } - - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) - .execute() - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment 1")); - assertHighlight(search, i, "attachments.body", 1, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) + .execute(), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment 1")); + assertHighlight(response, i, "attachments.body", 1, equalTo("attachment 2")); + } + } + ); } public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Exception { @@ -548,8 +574,7 @@ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Except IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource( XContentFactory.jsonBuilder() .startObject() @@ -571,46 +596,52 @@ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Except } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - // asking for the whole field to be highlighted - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(search, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } - - search = prepareSearch().setQuery(matchQuery("title", "bug")) - // sentences will be generated out of each value - .highlighter(new HighlightBuilder().field("title")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(search, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + // asking for the whole field to be highlighted + .highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") + ); + assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); + } + } + ); - search = prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) - .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)) - .get(); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + // sentences will be generated out of each value + .highlighter(new HighlightBuilder().field("title")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") + ); + assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); + } + } + ); - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "attachments.body", 0, equalTo("attachment for this test")); - assertHighlight(search, i, "attachments.body", 1, 2, equalTo("attachment 2")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("attachments.body", "attachment")) + .highlighter(new HighlightBuilder().field("attachments.body", -1, 2)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight(response, i, "attachments.body", 0, equalTo("attachment for this test")); + assertHighlight(response, i, "attachments.body", 1, 2, equalTo("attachment 2")); + } + } + ); } public void testHighlightIssue1994() throws Exception { @@ -624,42 +655,54 @@ public void testHighlightIssue1994() throws Exception { ); String[] titles = new String[] { "This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us" }; - indexRandom(false, client().prepareIndex("test").setId("1").setSource("title", titles, "titleTV", titles)); + indexRandom(false, prepareIndex("test").setId("1").setSource("title", titles, "titleTV", titles)); indexRandom( true, - client().prepareIndex("test").setId("2").setSource("titleTV", new String[] { "some text to highlight", "highlight other text" }) + prepareIndex("test").setId("2").setSource("titleTV", new String[] { "some text to highlight", "highlight other text" }) ); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 2).field("titleTV", -1, 2).requireFieldMatch(false)) - .get(); - - assertHighlight(search, 0, "title", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - assertHighlight(search, 0, "title", 1, 2, equalTo("The bug is bugging us")); - assertHighlight(search, 0, "titleTV", 0, equalTo("This is a test on the highlighting bug present in elasticsearch")); - assertHighlight(search, 0, "titleTV", 1, 2, equalTo("The bug is bugging us")); - - search = prepareSearch().setQuery(matchQuery("titleTV", "highlight")) - .highlighter(new HighlightBuilder().field("titleTV", -1, 2)) - .get(); - - assertHighlight(search, 0, "titleTV", 0, equalTo("some text to highlight")); - assertHighlight(search, 0, "titleTV", 1, 2, equalTo("highlight other text")); + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", -1, 2).field("titleTV", -1, 2).requireFieldMatch(false)), + response -> { + assertHighlight( + response, + 0, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + assertHighlight(response, 0, "title", 1, 2, equalTo("The bug is bugging us")); + assertHighlight( + response, + 0, + "titleTV", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + assertHighlight(response, 0, "titleTV", 1, 2, equalTo("The bug is bugging us")); + } + ); + assertResponse( + prepareSearch().setQuery(matchQuery("titleTV", "highlight")).highlighter(new HighlightBuilder().field("titleTV", -1, 2)), + response -> { + assertHighlight(response, 0, "titleTV", 0, equalTo("some text to highlight")); + assertHighlight(response, 0, "titleTV", 1, 2, equalTo("highlight other text")); + } + ); } public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { createIndex("test"); ensureGreen(); - client().prepareIndex("test") - .setSource( - "field1", - new String[] { "this is a test", "this is the second test" }, - "field2", - new String[] { "this is another test", "yet another test" } - ) - .get(); + prepareIndex("test").setSource( + "field1", + new String[] { "this is a test", "this is the second test" }, + "field2", + new String[] { "this is another test", "yet another test" } + ).get(); refresh(); logger.info("--> highlighting and searching on field1 and field2 produces different tags"); @@ -679,11 +722,11 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { ) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 2, equalTo("test")); - assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo("test")); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("yet another test")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 2, equalTo("test")); + assertHighlight(response, 0, "field1", 1, 2, equalTo("test")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("yet another test")); + }); } // Issue #5175 @@ -700,16 +743,14 @@ public void testHighlightingOnWildcardFields() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setSource( - "field-postings", - "This is the first test sentence. Here is the second one.", - "field-fvh", - "This is the test with term_vectors", - "field-plain", - "This is the test for the plain highlighter" - ) - .get(); + prepareIndex("test").setSource( + "field-postings", + "This is the first test sentence. Here is the second one.", + "field-fvh", + "This is the test with term_vectors", + "field-plain", + "This is the test for the plain highlighter" + ).get(); refresh(); logger.info("--> highlighting and searching on field*"); @@ -718,24 +759,24 @@ public void testHighlightingOnWildcardFields() throws Exception { .query(termQuery("field-postings", "test")) .highlighter(highlight().field("field*").preTags("").postTags("").requireFieldMatch(false)); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - assertHighlight( - searchResponse, - 0, - "field-postings", - 0, - 1, - equalTo("This is the first test sentence. Here is the second one.") - ); - assertHighlight(searchResponse, 0, "field-fvh", 0, 1, equalTo("This is the test with term_vectors")); - assertHighlight(searchResponse, 0, "field-plain", 0, 1, equalTo("This is the test for the plain highlighter")); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHighlight( + response, + 0, + "field-postings", + 0, + 1, + equalTo("This is the first test sentence. Here is the second one.") + ); + assertHighlight(response, 0, "field-fvh", 0, 1, equalTo("This is the test with term_vectors")); + assertHighlight(response, 0, "field-plain", 0, 1, equalTo("This is the test for the plain highlighter")); + }); } public void testPlainHighlighter() throws Exception { ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -746,8 +787,7 @@ public void testPlainHighlighter() throws Exception { public void testPlainHighlighterOrder() throws Exception { ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "The quick brown fox jumps over the lazy brown dog but to no suprise the dog doesn't care") + prepareIndex("test").setSource("field1", "The quick brown fox jumps over the lazy brown dog but to no suprise the dog doesn't care") .get(); refresh(); @@ -756,23 +796,23 @@ public void testPlainHighlighterOrder() throws Exception { SearchSourceBuilder source = searchSource().query(matchQuery("field1", "brown dog")) .highlighter(highlight().highlighterType("plain").field("field1").preTags("").postTags("").fragmentSize(25)); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); // lets be explicit about the order source = searchSource().query(matchQuery("field1", "brown dog")) .highlighter( highlight().highlighterType("plain").field("field1").order("none").preTags("").postTags("").fragmentSize(25) ); - searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); } { // order by score @@ -781,11 +821,11 @@ public void testPlainHighlighterOrder() throws Exception { highlight().highlighterType("plain").order("score").field("field1").preTags("").postTags("").fragmentSize(25) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field1", 0, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(searchResponse, 0, "field1", 1, 3, equalTo("The quick brown fox")); - assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight(response, 0, "field1", 0, 3, equalTo(" jumps over the lazy brown dog")); + assertHighlight(response, 0, "field1", 1, 3, equalTo("The quick brown fox")); + assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); + }); } } @@ -795,7 +835,7 @@ public void testFastVectorHighlighter() throws Exception { indexRandom( true, - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") ); logger.info("--> highlighting and searching on field1"); @@ -821,10 +861,7 @@ public void testHighlighterWithSentenceBoundaryScanner() throws Exception { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom( - true, - client().prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.") - ); + indexRandom(true, prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.")); for (String type : new String[] { "unified", "fvh" }) { logger.info("--> highlighting and searching on 'field' with sentence boundary_scanner"); @@ -836,25 +873,25 @@ public void testHighlighterWithSentenceBoundaryScanner() throws Exception { .postTags("") .boundaryScannerType(BoundaryScannerType.SENTENCE) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) - ); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight( + response, + 0, + "field1", + 0, + 2, + anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) + ); - assertHighlight( - searchResponse, - 0, - "field1", - 1, - 2, - anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) - ); + assertHighlight( + response, + 0, + "field1", + 1, + 2, + anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) + ); + }); } } @@ -862,10 +899,7 @@ public void testHighlighterWithSentenceBoundaryScannerAndLocale() throws Excepti assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom( - true, - client().prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.") - ); + indexRandom(true, prepareIndex("test").setSource("field1", "A sentence with few words. Another sentence with even more words.")); for (String type : new String[] { "fvh", "unified" }) { logger.info("--> highlighting and searching on 'field' with sentence boundary_scanner"); @@ -879,25 +913,25 @@ public void testHighlighterWithSentenceBoundaryScannerAndLocale() throws Excepti .boundaryScannerLocale(Locale.ENGLISH.toLanguageTag()) ); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) - ); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHighlight( + response, + 0, + "field1", + 0, + 2, + anyOf(equalTo("A sentence with few words"), equalTo("A sentence with few words. ")) + ); - assertHighlight( - searchResponse, - 0, - "field1", - 1, - 2, - anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) - ); + assertHighlight( + response, + 0, + "field1", + 1, + 2, + anyOf(equalTo("Another sentence with"), equalTo("Another sentence with even more words. ")) + ); + }); } } @@ -905,7 +939,7 @@ public void testHighlighterWithWordBoundaryScanner() throws Exception { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); + indexRandom(true, prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); logger.info("--> highlighting and searching on 'field' with word boundary_scanner"); for (String type : new String[] { "unified", "fvh" }) { @@ -933,7 +967,7 @@ public void testHighlighterWithWordBoundaryScannerAndLocale() throws Exception { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); + indexRandom(true, prepareIndex("test").setSource("field1", "some quick and hairy brown:fox jumped over the lazy dog")); for (String type : new String[] { "unified", "fvh" }) { SearchSourceBuilder source = searchSource().query(termQuery("field1", "some")) @@ -968,15 +1002,19 @@ public void testFVHManyMatches() throws Exception { // Index one megabyte of "t " over and over and over again String pattern = "t "; String value = new String(new char[1024 * 256 / pattern.length()]).replace("\0", pattern); - client().prepareIndex("test").setSource("field1", value).get(); + prepareIndex("test").setSource("field1", value).get(); refresh(); + final long[] tookDefaultPhrase = new long[1]; + final long[] tookLargePhrase = new long[1]; + logger.info("--> highlighting and searching on field1 with default phrase limit"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "t")) .highlighter(highlight().highlighterType("fvh").field("field1", 20, 1).order("score").preTags("").postTags("")); - SearchResponse defaultPhraseLimit = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(defaultPhraseLimit, 0, "field1", 0, 1, containsString("t")); - + assertResponse(client().search(new SearchRequest("test").source(source)), defaultPhraseLimit -> { + assertHighlight(defaultPhraseLimit, 0, "field1", 0, 1, containsString("t")); + tookDefaultPhrase[0] = defaultPhraseLimit.getTook().getMillis(); + }); logger.info("--> highlighting and searching on field1 with large phrase limit"); source = searchSource().query(termQuery("field1", "t")) .highlighter( @@ -987,15 +1025,16 @@ public void testFVHManyMatches() throws Exception { .postTags("") .phraseLimit(30000) ); - SearchResponse largePhraseLimit = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHighlight(largePhraseLimit, 0, "field1", 0, 1, containsString("t")); - + assertResponse(client().search(new SearchRequest("test").source(source)), largePhraseLimit -> { + assertHighlight(largePhraseLimit, 0, "field1", 0, 1, containsString("t")); + tookLargePhrase[0] = largePhraseLimit.getTook().getMillis(); + }); /* * I hate comparing times because it can be inconsistent but default is * in the neighborhood of 300ms and the large phrase limit is in the * neighborhood of 8 seconds. */ - assertThat(defaultPhraseLimit.getTook().getMillis(), lessThan(largePhraseLimit.getTook().getMillis())); + assertThat(tookDefaultPhrase[0], lessThan(tookLargePhrase[0])); } public void testMatchedFieldsFvhRequireFieldMatch() throws Exception { @@ -1071,12 +1110,16 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception SearchRequestBuilder req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); // First check highlighting without any matched fields set - SearchResponse resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // And that matching a subfield doesn't automatically highlight it - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Add the subfield to the list of matched fields but don't match it. Everything should still work // like before we added it. @@ -1087,12 +1130,16 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo", "foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now make half the matches come from the stored field and half from just a matched field. - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now remove the stored field from the matched field list. That should work too. fooField = new Field("foo").numOfFragments(1) @@ -1102,8 +1149,10 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now make sure boosted fields don't blow up when matched fields is both the subfield and stored field. fooField = new Field("foo").numOfFragments(1) @@ -1113,28 +1162,40 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .requireFieldMatch(requireFieldMatch); fooField.matchedFields("foo", "foo.plain"); req = prepareSearch("test").highlighter(new HighlightBuilder().field(fooField)); - resp = req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Now just all matches are against the matched field. This still returns highlighting. - resp = req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // And all matched field via the queryString's field parameter, just in case - resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo.plain")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // Finding the same string two ways is ok too - resp = req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); + assertResponse( + req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")) + ); // But we use the best found score when sorting fragments - resp = req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertResponse( + req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")), + response -> assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")) + ); // which can also be written by searching on the subfield - resp = req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain", 5)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertResponse( + req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain", 5)), + response -> assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")) + ); // Speaking of two fields, you can have two fields, only one of which has matchedFields enabled QueryBuilder twoFieldsQuery = queryStringQuery("cats").field("foo").field("foo.plain", 5).field("bar").field("bar.plain", 5); @@ -1143,50 +1204,63 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception .fragmentSize(25) .highlighterType("fvh") .requireFieldMatch(requireFieldMatch); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + }); // And you can enable matchedField highlighting on both barField.matchedFields("bar", "bar.plain"); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("junk junk cats junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("junk junk cats junk junk")); + }); // Setting a matchedField that isn't searched/doesn't exist is simply ignored. barField.matchedFields("bar", "candy"); - resp = req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)).get(); - assertHighlight(resp, 0, "foo", 0, equalTo("junk junk cats junk junk")); - assertHighlight(resp, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + assertResponse(req.setQuery(twoFieldsQuery).highlighter(new HighlightBuilder().field(fooField).field(barField)), response -> { + assertHighlight(response, 0, "foo", 0, equalTo("junk junk cats junk junk")); + assertHighlight(response, 0, "bar", 0, equalTo("cat cat junk junk junk junk")); + }); // If the stored field doesn't have a value it doesn't matter what you match, you get nothing. barField.matchedFields("bar", "foo.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")); + assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + } + ); // If the stored field is found but the matched field isn't then you don't get a result either. fooField.matchedFields("bar.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("foo"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("foo"))) + ); // But if you add the stored field to the list of matched fields then you'll get a result again fooField.matchedFields("foo", "bar.plain"); - resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("running with scissors")); - assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + assertResponse( + req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("running with scissors")); + assertThat(response.getHits().getAt(0).getHighlightFields(), not(hasKey("bar"))); + } + ); // You _can_ highlight fields that aren't subfields of one another. - resp = req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")) - .highlighter(new HighlightBuilder().field(fooField).field(barField)) - .get(); - assertHighlight(resp, 0, "foo", 0, equalTo("weird")); - assertHighlight(resp, 0, "bar", 0, equalTo("result")); + assertResponse( + req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")) + .highlighter(new HighlightBuilder().field(fooField).field(barField)), + response -> { + assertHighlight(response, 0, "foo", 0, equalTo("weird")); + assertHighlight(response, 0, "bar", 0, equalTo("result")); + } + ); assertFailures( req.setQuery(queryStringQuery("result").field("foo").field("foo.plain").field("bar").field("bar.plain")), @@ -1202,21 +1276,24 @@ public void testFastVectorHighlighterManyDocs() throws Exception { int COUNT = between(20, 100); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT]; for (int i = 0; i < COUNT; i++) { - indexRequestBuilders[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "test " + i); + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)).setSource("field1", "test " + i); } logger.info("--> indexing docs"); indexRandom(true, indexRequestBuilders); logger.info("--> searching explicitly on field1 and highlighting on it"); - SearchResponse searchResponse = prepareSearch().setSize(COUNT) - .setQuery(termQuery("field1", "test")) - .highlighter(new HighlightBuilder().field("field1", 100, 0)) - .get(); - for (int i = 0; i < COUNT; i++) { - SearchHit hit = searchResponse.getHits().getHits()[i]; - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... - assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("test " + hit.getId())); - } + assertResponse( + prepareSearch().setSize(COUNT) + .setQuery(termQuery("field1", "test")) + .highlighter(new HighlightBuilder().field("field1", 100, 0)), + response -> { + for (int i = 0; i < COUNT; i++) { + SearchHit hit = response.getHits().getHits()[i]; + // LUCENE 3.1 UPGRADE: Caused adding the space at the end... + assertHighlight(response, i, "field1", 0, 1, equalTo("test " + hit.getId())); + } + } + ); } public XContentBuilder type1TermVectorMapping() throws IOException { @@ -1242,19 +1319,26 @@ public void testSameContent() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test on the highlighting bug present in elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", -1, 0)) - .get(); - - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting bug present in elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")).highlighter(new HighlightBuilder().field("title", -1, 0)), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test on the highlighting bug present in elasticsearch") + ); + } + } + ); } public void testFastVectorHighlighterOffsetParameter() throws Exception { @@ -1262,20 +1346,21 @@ public void testFastVectorHighlighterOffsetParameter() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test on the highlighting bug present in elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "bug")) - .highlighter(new HighlightBuilder().field("title", 30, 1, 10).highlighterType("fvh")) - .get(); - - for (int i = 0; i < 5; i++) { - // LUCENE 3.1 UPGRADE: Caused adding the space at the end... - assertHighlight(search, i, "title", 0, 1, equalTo("highlighting bug present in elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "bug")) + .highlighter(new HighlightBuilder().field("title", 30, 1, 10).highlighterType("fvh")), + response -> { + for (int i = 0; i < 5; i++) { + // LUCENE 3.1 UPGRADE: Caused adding the space at the end... + assertHighlight(response, i, "title", 0, 1, equalTo("highlighting bug present in elasticsearch")); + } + } + ); } public void testEscapeHtml() throws Exception { @@ -1283,19 +1368,27 @@ public void testEscapeHtml() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10)) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight(search, i, "title", 0, 1, startsWith("This is a html escaping highlighting test for *&?")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 50, 1, 10)), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + startsWith("This is a html escaping highlighting test for *&?") + ); + } + } + ); } public void testEscapeHtmlVector() throws Exception { @@ -1303,19 +1396,20 @@ public void testEscapeHtmlVector() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10).highlighterType("plain")) - .get(); - - for (int i = 0; i < 5; i++) { - assertHighlight(search, i, "title", 0, 1, equalTo(" highlighting test for *&? elasticsearch")); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")) + .highlighter(new HighlightBuilder().encoder("html").field("title", 30, 1, 10).highlighterType("plain")), + response -> { + for (int i = 0; i < 5; i++) { + assertHighlight(response, i, "title", 0, 1, equalTo(" highlighting test for *&? elasticsearch")); + } + } + ); } public void testMultiMapperVectorWithStore() throws Exception { @@ -1344,7 +1438,7 @@ public void testMultiMapperVectorWithStore() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1397,7 +1491,7 @@ public void testMultiMapperVectorFromSource() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1450,7 +1544,7 @@ public void testMultiMapperNoVectorWithStore() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1502,7 +1596,7 @@ public void testMultiMapperNoVectorFromSource() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -1534,8 +1628,7 @@ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exceptio IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test for the enabling fast vector highlighter"); } indexRandom(true, indexRequestBuilders); @@ -1567,61 +1660,66 @@ public void testDisableFastVectorHighlighter() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724"); } indexRandom(true, indexRequestBuilders); - SearchResponse search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")) - .get(); + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("fvh")), + response -> { - for (int i = 0; i < indexRequestBuilders.length; i++) { - // Because of SOLR-3724 nothing is highlighted when FVH is used - assertNotHighlighted(search, i, "title"); - } + for (int i = 0; i < indexRequestBuilders.length; i++) { + // Because of SOLR-3724 nothing is highlighted when FVH is used + assertNotHighlighted(response, i, "title"); + } + } + ); // Using plain highlighter instead of FVH - search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("plain")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("plain")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") + ); + } + } + ); // Using plain highlighter instead of FVH on the field level - search = prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("title").highlighterType("plain")).highlighterType("plain") - ) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - search, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } + assertResponse( + prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) + .highlighter( + new HighlightBuilder().field(new HighlightBuilder.Field("title").highlighterType("plain")).highlighterType("plain") + ), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") + ); + } + } + ); } public void testFSHHighlightAllMvFragments() throws Exception { assertAcked(prepareCreate("test").setMapping("tags", "type=text,term_vector=with_positions_offsets")); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "tags", new String[] { @@ -1631,25 +1729,27 @@ public void testFSHHighlightAllMvFragments() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("tags", "tag")) - .highlighter(new HighlightBuilder().field("tags", -1, 0).highlighterType("fvh")) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("tags", "tag")) + .highlighter(new HighlightBuilder().field("tags", -1, 0).highlighterType("fvh")), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long and has the tag token near the end") + ); + } ); } public void testBoostingQuery() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -1669,7 +1769,7 @@ public void testBoostingQuery() { public void testBoostingQueryTermVector() throws IOException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -1689,8 +1789,7 @@ public void testBoostingQueryTermVector() throws IOException { public void testPlainHighlightDifferentFragmenter() throws Exception { assertAcked(prepareCreate("test").setMapping("tags", "type=text")); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .array( @@ -1703,40 +1802,44 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { .get(); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) - .highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple") - ) - ) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple") + ) + ), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long tag and has the tag token near the end") + ); + } ); - response = prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) - .highlighter( - new HighlightBuilder().field( - new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("span") - ) - ) - .get(); - - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) + .highlighter( + new HighlightBuilder().field( + new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("span") + ) + ), + response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long tag and has the tag token near the end") + ); + } ); assertFailures( @@ -1758,14 +1861,18 @@ public void testPlainHighlighterMultipleFields() { indexDoc("test", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true)) - .field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) - ) - .get(); - assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); - assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true) + ).field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) + ), + response -> { + assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + } + ); } public void testFastVectorHighlighterMultipleFields() { @@ -1782,31 +1889,36 @@ public void testFastVectorHighlighterMultipleFields() { indexDoc("test", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) - .highlighter( - new HighlightBuilder().field(new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true)) - .field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) - ) - .get(); - assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); - assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field1", "fox")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("field1").preTags("<1>").postTags("").requireFieldMatch(true) + ).field(new HighlightBuilder.Field("field2").preTags("<2>").postTags("").requireFieldMatch(false)) + ), + response -> { + assertHighlight(response, 0, "field1", 0, 1, equalTo("The quick brown <1>fox")); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); + } + ); } public void testMissingStoredField() throws Exception { assertAcked(prepareCreate("test").setMapping("highlight_field", "type=text,store=true")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); refresh(); // This query used to fail when the field to highlight was absent - SearchResponse response = prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "highlight")) - .highlighter( - new HighlightBuilder().field( - new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1).fragmenter("simple") - ) - ) - .get(); - assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "highlight")) + .highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("highlight_field").fragmentSize(-1).numOfFragments(1).fragmenter("simple") + ) + ), + response -> assertThat(response.getHits().getHits()[0].getHighlightFields().isEmpty(), equalTo(true)) + ); } // Issue #3211 @@ -1831,8 +1943,7 @@ public void testNumericHighlighting() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("text", "elasticsearch test", "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42) .get(); refresh(); @@ -1862,7 +1973,7 @@ public void testResetTwice() throws Exception { ).setMapping("text", "type=text,analyzer=my_analyzer") ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("text", "elasticsearch test").get(); + prepareIndex("test").setId("1").setSource("text", "elasticsearch test").get(); refresh(); // Mock tokenizer will throw an exception if it is resetted twice @@ -1891,22 +2002,19 @@ public void testHighlightUsesHighlightQuery() throws IOException { .highlighter(highlightBuilder); Matcher searchQueryMatcher = equalTo("Testing the highlight query feature"); - SearchResponse response = search.get(); - assertHighlight(response, 0, "text", 0, searchQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, searchQueryMatcher)); field = new HighlightBuilder.Field("text"); Matcher hlQueryMatcher = equalTo("Testing the highlight query feature"); field.highlightQuery(matchQuery("text", "query")); highlightBuilder = new HighlightBuilder().field(field); search = prepareSearch("test").setQuery(QueryBuilders.matchQuery("text", "testing")).highlighter(highlightBuilder); - response = search.get(); - assertHighlight(response, 0, "text", 0, hlQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, hlQueryMatcher)); // Make sure the highlightQuery is taken into account when it is set on the highlight context instead of the field highlightBuilder.highlightQuery(matchQuery("text", "query")); field.highlighterType(type).highlightQuery(null); - response = search.get(); - assertHighlight(response, 0, "text", 0, hlQueryMatcher); + assertResponse(search, response -> assertHighlight(response, 0, "text", 0, hlQueryMatcher)); } } @@ -2212,28 +2320,28 @@ public void testHighlightNoMatchSizeNumberOfFragments() { // if there's a match we only return the values with matches (whole value as number_of_fragments == 0) MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth"); field.highlighterType("plain"); - SearchResponse response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); field.highlighterType("fvh"); - response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); - + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); field.highlighterType("unified"); - response = prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); - assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + assertResponse(prepareSearch("test").setQuery(queryBuilder).highlighter(new HighlightBuilder().field(field)), response -> { + assertHighlight(response, 0, "text", 0, 2, equalTo("This is the third sentence. This is the fourth sentence.")); + assertHighlight(response, 0, "text", 1, 2, equalTo("This is the fifth sentence")); + }); } public void testPostingsHighlighter() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog") - .get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2312,8 +2420,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( "field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. " @@ -2329,24 +2436,22 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { SearchSourceBuilder source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(5).preTags("").postTags(""))); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - 2, - equalTo( - "The quick brown fox jumps over the lazy dog." - + " The lazy red fox jumps over the quick dog." - ) - ); - assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo("The quick brown dog jumps over the lazy fox.")); - - client().prepareIndex("test") - .setId("2") + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + assertHighlight( + response, + 0, + "field1", + 0, + 2, + equalTo( + "The quick brown fox jumps over the lazy dog." + + " The lazy red fox jumps over the quick dog." + ) + ); + assertHighlight(response, 0, "field1", 1, 2, equalTo("The quick brown dog jumps over the lazy fox.")); + }); + prepareIndex("test").setId("2") .setSource( "field1", new String[] { @@ -2360,39 +2465,40 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(0).preTags("").postTags(""))); - searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 2L); - - for (SearchHit searchHit : searchResponse.getHits()) { - if ("1".equals(searchHit.getId())) { - assertHighlight( - searchHit, - "field1", - 0, - 1, - equalTo( - "The quick brown fox jumps over the lazy dog. " - + "The lazy red fox jumps over the quick dog. " - + "The quick brown dog jumps over the lazy fox." - ) - ); - } else if ("2".equals(searchHit.getId())) { - assertHighlight( - searchHit, - "field1", - 0, - 3, - equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished") - ); - assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); - assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); - } else { - fail("Only hits with id 1 and 2 are returned"); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 2L); + + for (SearchHit searchHit : response.getHits()) { + if ("1".equals(searchHit.getId())) { + assertHighlight( + searchHit, + "field1", + 0, + 1, + equalTo( + "The quick brown fox jumps over the lazy dog. " + + "The lazy red fox jumps over the quick dog. " + + "The quick brown dog jumps over the lazy fox." + ) + ); + } else if ("2".equals(searchHit.getId())) { + assertHighlight( + searchHit, + "field1", + 0, + 3, + equalTo("The quick brown fox jumps over the lazy dog. Second sentence not finished") + ); + assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red fox jumps over the quick dog.")); + assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy fox.")); + } else { + fail("Only hits with id 1 and 2 are returned"); + } } - } + }); } - public void testMultiMatchQueryHighlight() throws IOException { + public void testMultiMatchQueryHighlight() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("_doc") @@ -2412,9 +2518,7 @@ public void testMultiMatchQueryHighlight() throws IOException { .endObject(); assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") - .get(); + prepareIndex("test").setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over").get(); refresh(); final int iters = scaledRandomIntBetween(20, 30); for (int i = 0; i < iters; i++) { @@ -2429,22 +2533,23 @@ public void testMultiMatchQueryHighlight() throws IOException { .field(new Field("field1").requireFieldMatch(true).preTags("").postTags("")) ); logger.info("Running multi-match type: [{}] highlight with type: [{}]", matchQueryType, highlighterType); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - anyOf( - equalTo("The quick brown fox jumps over"), - equalTo("The quick brown fox jumps over") - ) - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 1L); + assertHighlight( + response, + 0, + "field1", + 0, + anyOf( + equalTo("The quick brown fox jumps over"), + equalTo("The quick brown fox jumps over") + ) + ); + }); } } - public void testCombinedFieldsQueryHighlight() throws IOException { + public void testCombinedFieldsQueryHighlight() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("_doc") @@ -2465,9 +2570,7 @@ public void testCombinedFieldsQueryHighlight() throws IOException { assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") - .get(); + prepareIndex("test").setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over").get(); refresh(); for (String highlighterType : ALL_TYPES) { @@ -2478,15 +2581,16 @@ public void testCombinedFieldsQueryHighlight() throws IOException { .field(new Field("field1").requireFieldMatch(true).preTags("").postTags("")) ); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - assertHighlight( - searchResponse, - 0, - "field1", - 0, - equalTo("The quick brown fox jumps over") - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + assertHitCount(response, 1L); + assertHighlight( + response, + 0, + "field1", + 0, + equalTo("The quick brown fox jumps over") + ); + }); } } @@ -2494,49 +2598,47 @@ public void testPostingsHighlighterOrderByScore() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource( - "field1", - new String[] { - "This sentence contains one match, not that short. This sentence contains two sentence matches. " - + "This one contains no matches.", - "This is the second value's first sentence. This one contains no matches. " - + "This sentence contains three sentence occurrences (sentence).", - "One sentence match here and scored lower since the text is quite long, not that appealing. " - + "This one contains no matches." } - ) - .get(); + prepareIndex("test").setSource( + "field1", + new String[] { + "This sentence contains one match, not that short. This sentence contains two sentence matches. " + + "This one contains no matches.", + "This is the second value's first sentence. This one contains no matches. " + + "This sentence contains three sentence occurrences (sentence).", + "One sentence match here and scored lower since the text is quite long, not that appealing. " + + "This one contains no matches." } + ).get(); refresh(); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) .highlighter(highlight().field("field1").order("score")); - SearchResponse searchResponse = client().search(new SearchRequest("test").source(source)).actionGet(); - - Map highlightFieldMap = searchResponse.getHits().getAt(0).getHighlightFields(); - assertThat(highlightFieldMap.size(), equalTo(1)); - HighlightField field1 = highlightFieldMap.get("field1"); - assertThat(field1.fragments().length, equalTo(4)); - assertThat( - field1.fragments()[0].string(), - equalTo("This sentence contains three sentence occurrences (sentence).") - ); - assertThat( - field1.fragments()[1].string(), - equalTo( - "This sentence contains one match, not that short. " - + "This sentence contains two sentence matches." - ) - ); - assertThat( - field1.fragments()[2].string(), - equalTo("This is the second value's first sentence. This one contains no matches.") - ); - assertThat( - field1.fragments()[3].string(), - equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.") - ); + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + Map highlightFieldMap = response.getHits().getAt(0).getHighlightFields(); + assertThat(highlightFieldMap.size(), equalTo(1)); + HighlightField field1 = highlightFieldMap.get("field1"); + assertThat(field1.fragments().length, equalTo(4)); + assertThat( + field1.fragments()[0].string(), + equalTo("This sentence contains three sentence occurrences (sentence).") + ); + assertThat( + field1.fragments()[1].string(), + equalTo( + "This sentence contains one match, not that short. " + + "This sentence contains two sentence matches." + ) + ); + assertThat( + field1.fragments()[2].string(), + equalTo("This is the second value's first sentence. This one contains no matches.") + ); + assertThat( + field1.fragments()[3].string(), + equalTo("One sentence match here and scored lower since the text is quite long, not that appealing.") + ); + }); } public void testPostingsHighlighterEscapeHtml() throws Exception { @@ -2544,26 +2646,26 @@ public void testPostingsHighlighterEscapeHtml() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < 5; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch"); } indexRandom(true, indexRequestBuilders); - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("title", "test")) - .highlighter(new HighlightBuilder().field("title").encoder("html")) - .get(); - - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - searchResponse, - i, - "title", - 0, - 1, - equalTo("This is a html escaping highlighting test for *&? elasticsearch") - ); - } + assertResponse( + prepareSearch().setQuery(matchQuery("title", "test")).highlighter(new HighlightBuilder().field("title").encoder("html")), + response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a html escaping highlighting test for *&? elasticsearch") + ); + } + } + ); } public void testPostingsHighlighterMultiMapperWithStore() throws Exception { @@ -2592,35 +2694,39 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); refresh(); // simple search on body with standard analyzer with a simple field query - SearchResponse searchResponse = prepareSearch() - // lets make sure we analyze the query and we highlight the resulting terms - .setQuery(matchQuery("title", "This is a Test")) - .highlighter(new HighlightBuilder().field("title")) - .get(); - - assertHitCount(searchResponse, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - // stopwords are not highlighted since not indexed - assertHighlight(hit, "title", 0, 1, equalTo("this is a test . Second sentence.")); - + assertResponse( + prepareSearch() + // lets make sure we analyze the query and we highlight the resulting terms + .setQuery(matchQuery("title", "This is a Test")) + .highlighter(new HighlightBuilder().field("title")), + response -> { + + assertHitCount(response, 1L); + SearchHit hit = response.getHits().getAt(0); + // stopwords are not highlighted since not indexed + assertHighlight(hit, "title", 0, 1, equalTo("this is a test . Second sentence.")); + } + ); // search on title.key and highlight on title - searchResponse = prepareSearch().setQuery(matchQuery("title.key", "this is a test")) - .highlighter(new HighlightBuilder().field("title.key")) - .get(); - assertHitCount(searchResponse, 1L); + assertResponse( + prepareSearch().setQuery(matchQuery("title.key", "this is a test")).highlighter(new HighlightBuilder().field("title.key")), + response -> { + assertHitCount(response, 1L); - // stopwords are now highlighted since we used only whitespace analyzer here - assertHighlight( - searchResponse, - 0, - "title.key", - 0, - 1, - equalTo("this is a test . Second sentence.") + // stopwords are now highlighted since we used only whitespace analyzer here + assertHighlight( + response, + 0, + "title.key", + 0, + 1, + equalTo("this is a test . Second sentence.") + ); + } ); } @@ -2651,7 +2757,7 @@ public void testPostingsHighlighterMultiMapperFromSource() throws Exception { ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); + prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); // simple search on body with standard analyzer with a simple field query @@ -2695,8 +2801,7 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5]; for (int i = 0; i < indexRequestBuilders.length; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("title", "This is a test for the postings highlighter"); } indexRandom(true, indexRequestBuilders); @@ -2709,9 +2814,12 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { public void testPostingsHighlighterBoostingQuery() throws IOException, ExecutionException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2750,9 +2858,12 @@ public void testPostingsHighlighterPrefixQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2771,9 +2882,12 @@ public void testPostingsHighlighterFuzzyQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2792,9 +2906,12 @@ public void testPostingsHighlighterRegexpQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2813,9 +2930,12 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2830,24 +2950,18 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { ); source = searchSource().query(wildcardQuery("field2", "qu*k")).highlighter(highlight().field("field2")); - SearchResponse searchResponse = prepareSearch("test").setSource(source).get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch("test").setSource(source), response -> { + assertHitCount(response, 1L); - assertHighlight( - searchResponse, - 0, - "field2", - 0, - 1, - equalTo("The quick brown fox jumps over the lazy dog! Second sentence.") - ); + assertHighlight(response, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); + }); } public void testPostingsHighlighterTermRangeQuery() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); + prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2860,9 +2974,12 @@ public void testPostingsHighlighterQueryString() throws Exception { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") - .get(); + prepareIndex("test").setSource( + "field1", + "this is a test", + "field2", + "The quick brown fox jumps over the lazy dog! Second sentence." + ).get(); refresh(); logger.info("--> highlighting and searching on field2"); @@ -2882,7 +2999,7 @@ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2902,7 +3019,7 @@ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Excepti assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2925,7 +3042,7 @@ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Except assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2946,7 +3063,7 @@ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Excep assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); - client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); + prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); logger.info("--> highlighting and searching on field1"); @@ -2976,8 +3093,7 @@ public void testPostingsHighlighterManyDocs() throws Exception { // (https://github.com/elastic/elasticsearch/issues/4103) String prefix = randomAlphaOfLengthBetween(5, 30); prefixes.put(String.valueOf(i), prefix); - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource("field1", "Sentence " + prefix + " test. Sentence two."); } logger.info("--> indexing docs"); @@ -2987,13 +3103,14 @@ public void testPostingsHighlighterManyDocs() throws Exception { SearchRequestBuilder searchRequestBuilder = prepareSearch().setSize(COUNT) .setQuery(termQuery("field1", "test")) .highlighter(new HighlightBuilder().field("field1")); - SearchResponse searchResponse = searchRequestBuilder.get(); - assertHitCount(searchResponse, COUNT); - assertThat(searchResponse.getHits().getHits().length, equalTo(COUNT)); - for (SearchHit hit : searchResponse.getHits()) { - String prefix = prefixes.get(hit.getId()); - assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test. Sentence two.")); - } + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, COUNT); + assertThat(response.getHits().getHits().length, equalTo(COUNT)); + for (SearchHit hit : response.getHits()) { + String prefix = prefixes.get(hit.getId()); + assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " test. Sentence two.")); + } + }); } public void testDoesNotHighlightTypeName() throws Exception { @@ -3012,7 +3129,7 @@ public void testDoesNotHighlightTypeName() throws Exception { assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("foo", "test typename")); + indexRandom(true, prepareIndex("test").setSource("foo", "test typename")); for (String highlighter : ALL_TYPES) { assertHighlight( @@ -3044,7 +3161,7 @@ public void testDoesNotHighlightAliasFilters() throws Exception { assertAcked(indicesAdmin().prepareAliases().addAlias("test", "filtered_alias", matchQuery("foo", "japanese"))); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setSource("foo", "test japanese")); + indexRandom(true, prepareIndex("test").setSource("foo", "test japanese")); for (String highlighter : ALL_TYPES) { assertHighlight( @@ -3174,8 +3291,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "Arbitrary text field which will should not cause a failure").endObject()) .get(); refresh(); @@ -3186,12 +3302,15 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setCorners(61.10078883158897, -170.15625, -64.92354174306496, 118.47656249999999) ) .should(QueryBuilders.termQuery("text", "failure")); - SearchResponse search = prepareSearch().setSource( - new SearchSourceBuilder().query(query).highlighter(new HighlightBuilder().field("*").highlighterType(highlighterType)) - ).get(); - assertNoFailures(search); - assertThat(search.getHits().getTotalHits().value, equalTo(1L)); - assertThat(search.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); + assertNoFailuresAndResponse( + prepareSearch().setSource( + new SearchSourceBuilder().query(query).highlighter(new HighlightBuilder().field("*").highlighterType(highlighterType)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); + } + ); } public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException { @@ -3213,8 +3332,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException assertAcked(prepareCreate("test").setMapping(mappings)); ensureYellow(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("jd", "some आवश्यकता है- आर्य समाज अनाथालय, 68 सिविल लाइन्स, बरेली को एक पुरूष" + " रस text") @@ -3253,20 +3371,18 @@ public void testKeywordFieldHighlighting() throws IOException { mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()) - .get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()).get(); refresh(); - SearchResponse search = prepareSearch().setSource( - new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) - .highlighter(new HighlightBuilder().field("*")) - ).get(); - assertNoFailures(search); - assertThat(search.getHits().getTotalHits().value, equalTo(1L)); - assertThat( - search.getHits().getAt(0).getHighlightFields().get("keyword_field").getFragments()[0].string(), - equalTo("some text") + assertNoFailuresAndResponse( + prepareSearch().setSource( + new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) + .highlighter(new HighlightBuilder().field("*")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + HighlightField highlightField = response.getHits().getAt(0).getHighlightFields().get("keyword_field"); + assertThat(highlightField.fragments()[0].string(), equalTo("some text")); + } ); } @@ -3282,20 +3398,20 @@ public void testCopyToFields() throws Exception { b.endObject().endObject(); prepareCreate("test").setMapping(b).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("foo", "how now brown cow").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse response = prepareSearch().setQuery(matchQuery("foo_copy", "brown")) - .highlighter(new HighlightBuilder().field(new Field("foo_copy"))) - .get(); - - assertHitCount(response, 1); - HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_copy"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("how now brown cow")); + assertResponse( + prepareSearch().setQuery(matchQuery("foo_copy", "brown")).highlighter(new HighlightBuilder().field(new Field("foo_copy"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_copy"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("how now brown cow")); + } + ); } public void testACopyFieldWithNestedQuery() throws Exception { @@ -3321,8 +3437,7 @@ public void testACopyFieldWithNestedQuery() throws Exception { ); prepareCreate("test").setMapping(mapping).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("foo") @@ -3338,35 +3453,39 @@ public void testACopyFieldWithNestedQuery() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo_text"); - assertThat(field.getFragments().length, equalTo(2)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); - assertThat(field.getFragments()[1].string(), equalTo("cow")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo_text"); + assertThat(field.fragments().length, equalTo(2)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + assertThat(field.fragments()[1].string(), equalTo("cow")); + } + ); } public void testFunctionScoreQueryHighlight() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "brown").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch().setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) - .highlighter(new HighlightBuilder().field(new Field("text"))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) + .highlighter(new HighlightBuilder().field(new Field("text"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } public void testFiltersFunctionScoreQueryHighlight() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "brown").field("enable", "yes").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); @@ -3375,16 +3494,20 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { new RandomScoreFunctionBuilder() ); - SearchResponse searchResponse = prepareSearch().setQuery( - new FunctionScoreQueryBuilder( - QueryBuilders.prefixQuery("text", "bro"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { filterBuilder } - ) - ).highlighter(new HighlightBuilder().field(new Field("text"))).get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery( + new FunctionScoreQueryBuilder( + QueryBuilders.prefixQuery("text", "bro"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { filterBuilder } + ) + ).highlighter(new HighlightBuilder().field(new Field("text"))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } public void testHighlightQueryRewriteDatesWithNow() throws Exception { @@ -3397,25 +3520,26 @@ public void testHighlightQueryRewriteDatesWithNow() throws Exception { DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time"); indexRandom( true, - client().prepareIndex("index-1").setId("1").setSource("d", formatter.format(now), "field", "hello world"), - client().prepareIndex("index-1").setId("2").setSource("d", formatter.format(now.minusDays(1)), "field", "hello"), - client().prepareIndex("index-1").setId("3").setSource("d", formatter.format(now.minusDays(2)), "field", "world") + prepareIndex("index-1").setId("1").setSource("d", formatter.format(now), "field", "hello world"), + prepareIndex("index-1").setId("2").setSource("d", formatter.format(now.minusDays(1)), "field", "hello"), + prepareIndex("index-1").setId("3").setSource("d", formatter.format(now.minusDays(2)), "field", "world") ); ensureSearchable("index-1"); for (int i = 0; i < 5; i++) { - final SearchResponse r1 = prepareSearch("index-1").addSort("d", SortOrder.DESC) - .setTrackScores(true) - .highlighter(highlight().field("field").preTags("").postTags("")) - .setQuery( - QueryBuilders.boolQuery() - .must(QueryBuilders.rangeQuery("d").gte("now-12h").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) - .should(QueryBuilders.termQuery("field", "hello")) - ) - .get(); - - assertNoFailures(r1); - assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); - assertHighlight(r1, 0, "field", 0, 1, equalTo("hello world")); + assertNoFailuresAndResponse( + prepareSearch("index-1").addSort("d", SortOrder.DESC) + .setTrackScores(true) + .highlighter(highlight().field("field").preTags("").postTags("")) + .setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("d").gte("now-12h").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) + .should(QueryBuilders.termQuery("field", "hello")) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertHighlight(response, 0, "field", 0, 1, equalTo("hello world")); + } + ); } } @@ -3441,8 +3565,7 @@ public void testWithNestedQuery() throws Exception { ); prepareCreate("test").setMapping(mapping).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("foo") @@ -3460,51 +3583,63 @@ public void testWithNestedQuery() throws Exception { .get(); for (String type : new String[] { "unified", "plain" }) { - SearchResponse searchResponse = prepareSearch().setQuery( - nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None) - ).highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))).get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(2)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); - assertThat(field.getFragments()[1].string(), equalTo("cow")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchPhraseQuery("foo.text", "brown shoes"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); - - searchResponse = prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) - .get(); - assertHitCount(searchResponse, 1); - field = searchResponse.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown shoes")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(2)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + assertThat(field.fragments()[1].string(), equalTo("cow")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchPhraseQuery("foo.text", "brown shoes"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + } + ); } // For unified and fvh highlighters we just check that the nested query is correctly extracted // but we highlight the root text field since nested documents cannot be highlighted with postings nor term vectors // directly. for (String type : ALL_TYPES) { - SearchResponse searchResponse = prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("text").highlighterType(type).requireFieldMatch(false))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("text"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("brown")); + assertResponse( + prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("text").highlighterType(type).requireFieldMatch(false))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown")); + } + ); } } @@ -3514,20 +3649,19 @@ public void testWithNormalizer() throws Exception { assertAcked(prepareCreate("test").setSettings(builder.build()).setMapping("keyword", "type=keyword,normalizer=my_normalizer")); ensureGreen(); - client().prepareIndex("test") - .setId("0") - .setSource("keyword", "Hello World") - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + prepareIndex("test").setId("0").setSource("keyword", "Hello World").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); for (String highlighterType : new String[] { "unified", "plain" }) { - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("keyword", "hello world")) - .highlighter(new HighlightBuilder().field(new Field("keyword").highlighterType(highlighterType))) - .get(); - assertHitCount(searchResponse, 1); - HighlightField field = searchResponse.getHits().getAt(0).getHighlightFields().get("keyword"); - assertThat(field.getFragments().length, equalTo(1)); - assertThat(field.getFragments()[0].string(), equalTo("hello world")); + assertResponse( + prepareSearch().setQuery(matchQuery("keyword", "hello world")) + .highlighter(new HighlightBuilder().field(new Field("keyword").highlighterType(highlighterType))), + response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("keyword"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("hello world")); + } + ); } } @@ -3535,18 +3669,20 @@ public void testDisableHighlightIdField() throws Exception { assertAcked(prepareCreate("test").setMapping("keyword", "type=keyword")); ensureGreen(); - client().prepareIndex("test") - .setId("d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") + prepareIndex("test").setId("d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); for (String highlighterType : new String[] { "plain", "unified" }) { - SearchResponse searchResponse = prepareSearch().setQuery( - matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") - ).highlighter(new HighlightBuilder().field(new Field("*").highlighterType(highlighterType).requireFieldMatch(false))).get(); - assertHitCount(searchResponse, 1); - assertNull(searchResponse.getHits().getAt(0).getHighlightFields().get("_id")); + assertResponse( + prepareSearch().setQuery(matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1")) + .highlighter(new HighlightBuilder().field(new Field("*").highlighterType(highlighterType).requireFieldMatch(false))), + response -> { + assertHitCount(response, 1); + assertNull(response.getHits().getAt(0).getHighlightFields().get("_id")); + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 480556b942ac8..02867e0cf6920 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -317,10 +317,10 @@ public void testWithIndexFilter() throws InterruptedException { assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("index-1").setSource("timestamp", "2015-07-08")); - reqs.add(client().prepareIndex("index-1").setSource("timestamp", "2018-07-08")); - reqs.add(client().prepareIndex("index-2").setSource("timestamp", "2019-10-12")); - reqs.add(client().prepareIndex("index-2").setSource("timestamp", "2020-07-08")); + reqs.add(prepareIndex("index-1").setSource("timestamp", "2015-07-08")); + reqs.add(prepareIndex("index-1").setSource("timestamp", "2018-07-08")); + reqs.add(prepareIndex("index-2").setSource("timestamp", "2019-10-12")); + reqs.add(prepareIndex("index-2").setSource("timestamp", "2020-07-08")); indexRandom(true, reqs); FieldCapabilitiesResponse response = client().prepareFieldCaps("index-*").setFields("*").get(); @@ -446,13 +446,13 @@ private void populateTimeRangeIndices() throws Exception { .setMapping("timestamp", "type=date", "field1", "type=long") ); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2015-07-08")); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2018-07-08")); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2020-03-03")); - reqs.add(client().prepareIndex("log-index-1").setSource("timestamp", "2020-09-09")); - reqs.add(client().prepareIndex("log-index-2").setSource("timestamp", "2019-10-12")); - reqs.add(client().prepareIndex("log-index-2").setSource("timestamp", "2020-02-02")); - reqs.add(client().prepareIndex("log-index-2").setSource("timestamp", "2020-10-10")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2015-07-08")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2018-07-08")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2020-03-03")); + reqs.add(prepareIndex("log-index-1").setSource("timestamp", "2020-09-09")); + reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2019-10-12")); + reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2020-02-02")); + reqs.add(prepareIndex("log-index-2").setSource("timestamp", "2020-10-10")); indexRandom(true, reqs); ensureGreen("log-index-1", "log-index-2"); indicesAdmin().prepareRefresh("log-index-1", "log-index-2").get(); @@ -549,8 +549,7 @@ private void moveOrCloseShardsOnNodes(String nodeName) throws Exception { assertNotNull(toNode); clusterAdmin().prepareReroute() .add(new MoveAllocationCommand(shardId.getIndexName(), shardId.id(), fromNode.getId(), toNode.getId())) - .execute() - .actionGet(); + .get(); } } } @@ -638,7 +637,7 @@ public void testManyIndicesWithSameMapping() { ensureGreen(indices); assertAcked(indicesAdmin().preparePutMapping(indicesWithExtraField).setSource("extra_field", "type=integer").get()); for (String index : indicesWithExtraField) { - client().prepareIndex(index).setSource("extra_field", randomIntBetween(1, 1000)).get(); + prepareIndex(index).setSource("extra_field", randomIntBetween(1, 1000)).get(); } FieldCapabilitiesResponse resp = client().execute(FieldCapabilitiesAction.INSTANCE, request).actionGet(); verifyResponse.accept(resp); @@ -664,7 +663,7 @@ public void testCancel() throws Exception { ) ); BlockingOnRewriteQueryBuilder.blockOnRewrite(); - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); Request restRequest = new Request("POST", "/_field_caps?fields=*"); restRequest.setEntity(new StringEntity(""" { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index e3c9558eba907..9ad6363d0e57d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -61,8 +60,10 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertCheckedResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -180,8 +181,7 @@ public void testStoredFields() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().field("field1", "value1").field("field2", "value2").field("field3", "value3").endObject() ) @@ -189,70 +189,70 @@ public void testStoredFields() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field1").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field1"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + }); // field2 is not stored, check that it is not extracted from source. - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field2").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(0)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field2"), nullValue()); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("*3") - .addStoredField("field1") - .addStoredField("field2") - .get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap(), nullValue()); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("*").addStoredField("_source").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getSourceAsMap(), notNullValue()); - assertThat(searchResponse.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field2"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)); + assertThat(response.getHits().getAt(0).getFields().get("field2"), nullValue()); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").addStoredField("field1").addStoredField("field2"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + } + ); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field*"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap(), nullValue()); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*").addStoredField("_source"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap(), notNullValue()); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); + }); } public void testScriptDocAndFields() throws Exception { @@ -274,22 +274,19 @@ public void testScriptDocAndFields() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).field("date", "1970-01-01T00:00:00").endObject() ) .get(); indicesAdmin().prepareFlush().get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).field("date", "1970-01-01T00:00:25").endObject() ) .get(); indicesAdmin().prepareFlush().get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).field("date", "1970-01-01T00:02:00").endObject() ) @@ -297,64 +294,68 @@ public void testScriptDocAndFields() throws Exception { indicesAdmin().refresh(new RefreshRequest()).actionGet(); logger.info("running doc['num1'].value"); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .addScriptField( - "sNum1_field", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields['num1'].value", Collections.emptyMap()) - ) - .addScriptField( - "date1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) - ) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertFalse(response.getHits().getAt(0).hasSource()); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(0).getFields().get("sNum1_field").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getFields().get("sNum1_field").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(25000L)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - assertThat(response.getHits().getAt(2).getFields().get("sNum1_field").getValues().get(0), equalTo(3.0)); - assertThat(response.getHits().getAt(2).getFields().get("date1").getValues().get(0), equalTo(120000L)); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ) + .addScriptField( + "sNum1_field", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields['num1'].value", Collections.emptyMap()) + ) + .addScriptField( + "date1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertFalse(response.getHits().getAt(0).hasSource()); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(0).getFields().get("sNum1_field").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getFields().get("sNum1_field").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(25000L)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("sNum1", "sNum1_field", "date1"))); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(2).getFields().get("sNum1_field").getValues().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(2).getFields().get("date1").getValues().get(0), equalTo(120000L)); + } + ); logger.info("running doc['num1'].value * factor"); - response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .addScriptField( - "sNum1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value * factor", Map.of("factor", 2.0)) - ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(4.0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat(fields, equalTo(singleton("sNum1"))); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(6.0)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value * factor", Map.of("factor", 2.0)) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(4.0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(singleton("sNum1"))); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(6.0)); + } + ); } public void testScriptFieldWithNanos() throws Exception { @@ -378,38 +379,36 @@ public void testScriptFieldWithNanos() throws Exception { indexRandom( true, false, - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("date", "1970-01-01T00:00:00.000Z").endObject()), - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("date", "1970-01-01T00:00:00.000Z").endObject()), + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("date", SortOrder.ASC) - .addScriptField( - "date1", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) - ) - .addScriptField( - "date2", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.nanos", Collections.emptyMap()) - ) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getAt(0).getId(), is("1")); - assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(0).getFields().get("date2").getValues().get(0), equalTo(0L)); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(0L)); - - assertThat(response.getHits().getAt(1).getId(), is("2")); - Instant instant = ZonedDateTime.parse(date).toInstant(); - long dateAsNanos = DateUtils.toLong(instant); - long dateAsMillis = instant.toEpochMilli(); - assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(dateAsMillis)); - assertThat(response.getHits().getAt(1).getFields().get("date2").getValues().get(0), equalTo(dateAsNanos)); - assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo(dateAsNanos)); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("date", SortOrder.ASC) + .addScriptField( + "date1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.millis", Collections.emptyMap()) + ) + .addScriptField( + "date2", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['date'].date.nanos", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getAt(0).getId(), is("1")); + assertThat(response.getHits().getAt(0).getFields().get("date1").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(0).getFields().get("date2").getValues().get(0), equalTo(0L)); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(0L)); + + assertThat(response.getHits().getAt(1).getId(), is("2")); + Instant instant = ZonedDateTime.parse(date).toInstant(); + long dateAsNanos = DateUtils.toLong(instant); + long dateAsMillis = instant.toEpochMilli(); + assertThat(response.getHits().getAt(1).getFields().get("date1").getValues().get(0), equalTo(dateAsMillis)); + assertThat(response.getHits().getAt(1).getFields().get("date2").getValues().get(0), equalTo(dateAsNanos)); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo(dateAsNanos)); + } + ); } public void testIdBasedScriptFields() throws Exception { @@ -418,34 +417,32 @@ public void testIdBasedScriptFields() throws Exception { int numDocs = randomIntBetween(1, 30); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("test") - .setId(Integer.toString(i)) + indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("num1", i).endObject()); } indexRandom(true, indexRequestBuilders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .setSize(numDocs) - .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); - for (int i = 0; i < numDocs; i++) { - assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); - assertThat(fields, equalTo(singleton("id"))); - assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort("num1", SortOrder.ASC) + .setSize(numDocs) + .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + for (int i = 0; i < numDocs; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); + assertThat(fields, equalTo(singleton("id"))); + assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); + } + } + ); } public void testScriptFieldUsingSource() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startObject("obj1") @@ -467,64 +464,69 @@ public void testScriptFieldUsingSource() throws Exception { .get(); indicesAdmin().refresh(new RefreshRequest()).actionGet(); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addScriptField("s_obj1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1", Collections.emptyMap())) - .addScriptField( - "s_obj1_test", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1.test", Collections.emptyMap()) - ) - .addScriptField("s_obj2", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2", Collections.emptyMap())) - .addScriptField( - "s_obj2_arr2", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2.arr2", Collections.emptyMap()) - ) - .addScriptField("s_arr3", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.arr3", Collections.emptyMap())) - .get(); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addScriptField("s_obj1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1", Collections.emptyMap())) + .addScriptField( + "s_obj1_test", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj1.test", Collections.emptyMap()) + ) + .addScriptField("s_obj2", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2", Collections.emptyMap())) + .addScriptField( + "s_obj2_arr2", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.obj2.arr2", Collections.emptyMap()) + ) + .addScriptField("s_arr3", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.arr3", Collections.emptyMap())), + response -> { - assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0)); + assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0)); - assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); + assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); - Map sObj1 = response.getHits().getAt(0).field("s_obj1").getValue(); - assertThat(sObj1.get("test").toString(), equalTo("something")); - assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); + Map sObj1 = response.getHits().getAt(0).field("s_obj1").getValue(); + assertThat(sObj1.get("test").toString(), equalTo("something")); + assertThat(response.getHits().getAt(0).field("s_obj1_test").getValue().toString(), equalTo("something")); - Map sObj2 = response.getHits().getAt(0).field("s_obj2").getValue(); - List sObj2Arr2 = (List) sObj2.get("arr2"); - assertThat(sObj2Arr2.size(), equalTo(2)); - assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); - assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); + Map sObj2 = response.getHits().getAt(0).field("s_obj2").getValue(); + List sObj2Arr2 = (List) sObj2.get("arr2"); + assertThat(sObj2Arr2.size(), equalTo(2)); + assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); + assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); - sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").getValues(); - assertThat(sObj2Arr2.size(), equalTo(2)); - assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); - assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); + sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").getValues(); + assertThat(sObj2Arr2.size(), equalTo(2)); + assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1")); + assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2")); - List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").getValues(); - assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1")); + List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").getValues(); + assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1")); + } + ); } public void testScriptFieldsForNullReturn() throws Exception { - client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); - - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .addScriptField("test_script_1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - DocumentField fieldObj = response.getHits().getAt(0).field("test_script_1"); - assertThat(fieldObj, notNullValue()); - List fieldValues = fieldObj.getValues(); - assertThat(fieldValues, hasSize(1)); - assertThat(fieldValues.get(0), nullValue()); + prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); + + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addScriptField( + "test_script_1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap()) + ), + response -> { + DocumentField fieldObj = response.getHits().getAt(0).field("test_script_1"); + assertThat(fieldObj, notNullValue()); + List fieldValues = fieldObj.getValues(); + assertThat(fieldValues, hasSize(1)); + assertThat(fieldValues.get(0), nullValue()); + } + ); } public void testPartialFields() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -605,8 +607,7 @@ public void testStoredFieldsWithoutSource() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); ZonedDateTime date = ZonedDateTime.of(2012, 3, 22, 0, 0, 0, 0, ZoneOffset.UTC); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("byte_field", (byte) 1) @@ -624,64 +625,65 @@ public void testStoredFieldsWithoutSource() throws Exception { indicesAdmin().prepareRefresh().get(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("byte_field") - .addStoredField("short_field") - .addStoredField("integer_field") - .addStoredField("long_field") - .addStoredField("float_field") - .addStoredField("double_field") - .addStoredField("date_field") - .addStoredField("boolean_field") - .addStoredField("binary_field") - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "binary_field" - ) - ) + assertCheckedResponse( + prepareSearch().setQuery(matchAllQuery()) + .addStoredField("byte_field") + .addStoredField("short_field") + .addStoredField("integer_field") + .addStoredField("long_field") + .addStoredField("float_field") + .addStoredField("double_field") + .addStoredField("date_field") + .addStoredField("boolean_field") + .addStoredField("binary_field"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "binary_field" + ) + ) + ); + + SearchHit searchHit = response.getHits().getAt(0); + assertThat(searchHit.getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchHit.getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchHit.getFields().get("integer_field").getValue(), equalTo((Object) 3)); + assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); + assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + String dateTime = DateFormatter.forPattern("date_optional_time").format(date); + assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); + assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); + assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text".getBytes("UTF8")))); + } ); - - SearchHit searchHit = searchResponse.getHits().getAt(0); - assertThat(searchHit.getFields().get("byte_field").getValue().toString(), equalTo("1")); - assertThat(searchHit.getFields().get("short_field").getValue().toString(), equalTo("2")); - assertThat(searchHit.getFields().get("integer_field").getValue(), equalTo((Object) 3)); - assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); - assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); - assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - String dateTime = DateFormatter.forPattern("date_optional_time").format(date); - assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); - assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); - assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text".getBytes("UTF8")))); } public void testSearchFieldsMetadata() throws Exception { - client().prepareIndex("my-index") - .setId("1") + prepareIndex("my-index").setId("1") .setRouting("1") .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch("my-index").addStoredField("field1").addStoredField("_routing").get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).field("field1"), nullValue()); - assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1")); + assertResponse(prepareSearch("my-index").addStoredField("field1").addStoredField("_routing"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).field("field1"), nullValue()); + assertThat(response.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1")); + }); } public void testGetFieldsComplexField() throws Exception { @@ -741,28 +743,31 @@ public void testGetFieldsComplexField() throws Exception { .endObject() ); - client().prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, XContentType.JSON).get(); + prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, XContentType.JSON).get(); String field = "field1.field2.field3.field4"; - SearchResponse searchResponse = prepareSearch("my-index").addStoredField(field).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); + assertResponse(prepareSearch("my-index").addStoredField(field), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); + }); } // see #8203 public void testSingleValueFieldDatatField() throws ExecutionException, InterruptedException { assertAcked(indicesAdmin().prepareCreate("test").setMapping("test_field", "type=keyword").get()); - indexRandom(true, client().prepareIndex("test").setId("1").setSource("test_field", "foobar")); + indexRandom(true, prepareIndex("test").setId("1").setSource("test_field", "foobar")); refresh(); - SearchResponse searchResponse = prepareSearch("test").setSource( - new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field") - ).get(); - assertHitCount(searchResponse, 1); - Map fields = searchResponse.getHits().getHits()[0].getFields(); - assertThat(fields.get("test_field").getValue(), equalTo("foobar")); + assertResponse( + prepareSearch("test").setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).docValueField("test_field")), + response -> { + assertHitCount(response, 1); + Map fields = response.getHits().getHits()[0].getFields(); + assertThat(fields.get("test_field").getValue(), equalTo("foobar")); + } + ); } public void testDocValueFields() throws Exception { @@ -822,8 +827,7 @@ public void testDocValueFields() throws Exception { indicesAdmin().preparePutMapping().setSource(mapping, XContentType.JSON).get(); ZonedDateTime date = ZonedDateTime.of(2012, 3, 22, 0, 0, 0, 0, ZoneOffset.UTC); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("text_field", "foo") @@ -860,116 +864,116 @@ public void testDocValueFields() throws Exception { if (randomBoolean()) { builder.addDocValueField("*_field"); } - SearchResponse searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "text_field", - "keyword_field", - "binary_field", - "ip_field" + assertResponse(builder, response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "text_field", + "keyword_field", + "binary_field", + "ip_field" + ) ) - ) - ); + ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("date_optional_time").format(date)) - ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - - builder = prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"); - searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "text_field", - "keyword_field", - "binary_field", - "ip_field" + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("date_optional_time").format(date)) + ); + assertThat(response.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(response.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet( + "byte_field", + "short_field", + "integer_field", + "long_field", + "float_field", + "double_field", + "date_field", + "boolean_field", + "text_field", + "keyword_field", + "binary_field", + "ip_field" + ) ) - ) - ); - - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("date_optional_time").format(date)) - ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - - builder = prepareSearch().setQuery(matchAllQuery()) - .addDocValueField("byte_field", "#.0") - .addDocValueField("short_field", "#.0") - .addDocValueField("integer_field", "#.0") - .addDocValueField("long_field", "#.0") - .addDocValueField("float_field", "#.0") - .addDocValueField("double_field", "#.0") - .addDocValueField("date_field", "epoch_millis"); - searchResponse = builder.get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field")) - ); + ); - assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of("1.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of("2.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of("3.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of("4.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of("5.0"))); - assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of("6.0"))); - assertThat( - searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("epoch_millis").format(date)) + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("date_optional_time").format(date)) + ); + assertThat(response.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); + assertThat(response.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); + assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); + assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("byte_field", "#.0") + .addDocValueField("short_field", "#.0") + .addDocValueField("integer_field", "#.0") + .addDocValueField("long_field", "#.0") + .addDocValueField("float_field", "#.0") + .addDocValueField("double_field", "#.0") + .addDocValueField("date_field", "epoch_millis"), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getHits().length, equalTo(1)); + Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); + assertThat( + fields, + equalTo( + newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field") + ) + ); + assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of("1.0"))); + assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of("2.0"))); + assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of("3.0"))); + assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of("4.0"))); + assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of("5.0"))); + assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of("6.0"))); + assertThat( + response.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("epoch_millis").format(date)) + ); + } ); } @@ -994,8 +998,7 @@ public void testScriptFields() throws Exception { List reqs = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { reqs.add( - client().prepareIndex("index") - .setId(Integer.toString(i)) + prepareIndex("index").setId(Integer.toString(i)) .setSource( "s", Integer.toString(i), @@ -1021,18 +1024,18 @@ public void testScriptFields() throws Exception { new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + field + "']", Collections.emptyMap()) ); } - SearchResponse resp = req.get(); - assertNoFailures(resp); - for (SearchHit hit : resp.getHits().getHits()) { - final int id = Integer.parseInt(hit.getId()); - Map fields = hit.getFields(); - assertThat(fields.get("s").getValues(), equalTo(Collections.singletonList(Integer.toString(id)))); - assertThat(fields.get("l").getValues(), equalTo(Collections.singletonList((long) id))); - assertThat(fields.get("d").getValues(), equalTo(Collections.singletonList((double) id))); - assertThat(fields.get("ms").getValues(), equalTo(Arrays.asList(Integer.toString(id), Integer.toString(id + 1)))); - assertThat(fields.get("ml").getValues(), equalTo(Arrays.asList((long) id, id + 1L))); - assertThat(fields.get("md").getValues(), equalTo(Arrays.asList((double) id, id + 1d))); - } + assertNoFailuresAndResponse(req, response -> { + for (SearchHit hit : response.getHits().getHits()) { + final int id = Integer.parseInt(hit.getId()); + Map fields = hit.getFields(); + assertThat(fields.get("s").getValues(), equalTo(Collections.singletonList(Integer.toString(id)))); + assertThat(fields.get("l").getValues(), equalTo(Collections.singletonList((long) id))); + assertThat(fields.get("d").getValues(), equalTo(Collections.singletonList((double) id))); + assertThat(fields.get("ms").getValues(), equalTo(Arrays.asList(Integer.toString(id), Integer.toString(id + 1)))); + assertThat(fields.get("ml").getValues(), equalTo(Arrays.asList((long) id, id + 1L))); + assertThat(fields.get("md").getValues(), equalTo(Arrays.asList((double) id, id + 1d))); + } + }); } public void testDocValueFieldsWithFieldAlias() throws Exception { @@ -1071,30 +1074,31 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "text_field", "foo", "date_field", formatter.format(date)); refresh("test"); - SearchRequestBuilder builder = prepareSearch().setQuery(matchAllQuery()) - .addDocValueField("text_field_alias") - .addDocValueField("date_field_alias") - .addDocValueField("date_field"); - SearchResponse searchResponse = builder.get(); - - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - SearchHit hit = searchResponse.getHits().getAt(0); - - Map fields = hit.getFields(); - assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); - - DocumentField textFieldAlias = fields.get("text_field_alias"); - assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); - assertThat(textFieldAlias.getValue(), equalTo("foo")); - - DocumentField dateFieldAlias = fields.get("date_field_alias"); - assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); - assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - - DocumentField dateField = fields.get("date_field"); - assertThat(dateField.getName(), equalTo("date_field")); - assertThat(dateField.getValue(), equalTo("1990-12-29")); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("text_field_alias") + .addDocValueField("date_field_alias") + .addDocValueField("date_field"), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); + + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); + + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); + + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + assertThat(dateField.getValue(), equalTo("1990-12-29")); + } + ); } public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { @@ -1133,27 +1137,28 @@ public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "text_field", "foo", "date_field", formatter.format(date)); refresh("test"); - SearchRequestBuilder builder = prepareSearch().setQuery(matchAllQuery()).addDocValueField("*alias").addDocValueField("date_field"); - SearchResponse searchResponse = builder.get(); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addDocValueField("*alias").addDocValueField("date_field"), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().getAt(0); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - SearchHit hit = searchResponse.getHits().getAt(0); + Map fields = hit.getFields(); + assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); - Map fields = hit.getFields(); - assertThat(fields.keySet(), equalTo(newHashSet("text_field_alias", "date_field_alias", "date_field"))); + DocumentField textFieldAlias = fields.get("text_field_alias"); + assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); + assertThat(textFieldAlias.getValue(), equalTo("foo")); - DocumentField textFieldAlias = fields.get("text_field_alias"); - assertThat(textFieldAlias.getName(), equalTo("text_field_alias")); - assertThat(textFieldAlias.getValue(), equalTo("foo")); + DocumentField dateFieldAlias = fields.get("date_field_alias"); + assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); + assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - DocumentField dateFieldAlias = fields.get("date_field_alias"); - assertThat(dateFieldAlias.getName(), equalTo("date_field_alias")); - assertThat(dateFieldAlias.getValue(), equalTo("1990-12-29")); - - DocumentField dateField = fields.get("date_field"); - assertThat(dateField.getName(), equalTo("date_field")); - assertThat(dateField.getValue(), equalTo("1990-12-29")); + DocumentField dateField = fields.get("date_field"); + assertThat(dateField.getName(), equalTo("date_field")); + assertThat(dateField.getValue(), equalTo("1990-12-29")); + } + ); } public void testStoredFieldsWithFieldAlias() throws Exception { @@ -1185,18 +1190,19 @@ public void testStoredFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "field1", "value1", "field2", "value2"); refresh("test"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addStoredField("field1-alias") - .addStoredField("field2-alias") - .get(); - assertHitCount(searchResponse, 1L); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addStoredField("field1-alias").addStoredField("field2-alias"), + response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(1, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("field1-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(1, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("field1-alias")); - DocumentField field = hit.getFields().get("field1-alias"); - assertThat(field.getValue().toString(), equalTo("value1")); + DocumentField field = hit.getFields().get("field1-alias"); + assertThat(field.getValue().toString(), equalTo("value1")); + } + ); } public void testWildcardStoredFieldsWithFieldAlias() throws Exception { @@ -1228,19 +1234,20 @@ public void testWildcardStoredFieldsWithFieldAlias() throws Exception { indexDoc("test", "1", "field1", "value1", "field2", "value2"); refresh("test"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field*"), response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(2, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("field1")); - assertTrue(hit.getFields().containsKey("field1-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(2, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("field1")); + assertTrue(hit.getFields().containsKey("field1-alias")); - DocumentField field = hit.getFields().get("field1"); - assertThat(field.getValue().toString(), equalTo("value1")); + DocumentField field = hit.getFields().get("field1"); + assertThat(field.getValue().toString(), equalTo("value1")); - DocumentField fieldAlias = hit.getFields().get("field1-alias"); - assertThat(fieldAlias.getValue().toString(), equalTo("value1")); + DocumentField fieldAlias = hit.getFields().get("field1-alias"); + assertThat(fieldAlias.getValue().toString(), equalTo("value1")); + }); } public void testLoadMetadata() throws Exception { @@ -1248,20 +1255,17 @@ public void testLoadMetadata() throws Exception { indexRandom( true, - client().prepareIndex("test") - .setId("1") - .setRouting("1") - .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) + prepareIndex("test").setId("1").setRouting("1").setSource(jsonBuilder().startObject().field("field1", "value").endObject()) ); - SearchResponse response = prepareSearch("test").addStoredField("field1").get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertNoFailuresAndResponse(prepareSearch("test").addStoredField("field1"), response -> { + assertHitCount(response, 1); - Map fields = response.getHits().getAt(0).getMetadataFields(); + Map fields = response.getHits().getAt(0).getMetadataFields(); - assertThat(fields.get("field1"), nullValue()); - assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); - assertThat(response.getHits().getAt(0).getDocumentFields().size(), equalTo(0)); + assertThat(fields.get("field1"), nullValue()); + assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); + assertThat(response.getHits().getAt(0).getDocumentFields().size(), equalTo(0)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index faefeea0cb04e..eff2e8d3653c5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -8,12 +8,10 @@ package org.elasticsearch.search.functionscore; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoPoint; @@ -48,8 +46,10 @@ import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; @@ -87,9 +87,7 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { List indexBuilders = new ArrayList<>(); indexBuilders.add( - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -101,9 +99,7 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { ) ); indexBuilders.add( - client().prepareIndex() - .setId("2") - .setIndex("test") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -118,9 +114,7 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { int numDummyDocs = 20; for (int i = 1; i <= numDummyDocs; i++) { indexBuilders.add( - client().prepareIndex() - .setId(Integer.toString(i + 3)) - .setIndex("test") + prepareIndex("test").setId(Integer.toString(i + 3)) .setSource( jsonBuilder().startObject() .field("test", "value") @@ -140,61 +134,65 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { lonlat.add(20f); lonlat.add(11f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); // Test Lin - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + assertHitCount( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) + ), + (numDummyDocs + 2) ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) + ), + response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); } public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { @@ -218,25 +216,17 @@ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { // add tw docs within offset List indexBuilders = new ArrayList<>(); indexBuilders.add( - client().prepareIndex() - .setId("1") - .setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").field("num", 0.5).endObject()) + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("test", "value").field("num", 0.5).endObject()) ); indexBuilders.add( - client().prepareIndex() - .setId("2") - .setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.7).endObject()) + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.7).endObject()) ); // add docs outside offset int numDummyDocs = 20; for (int i = 0; i < numDummyDocs; i++) { indexBuilders.add( - client().prepareIndex() - .setId(Integer.toString(i + 3)) - .setIndex("test") + prepareIndex("test").setId(Integer.toString(i + 3)) .setSource(jsonBuilder().startObject().field("test", "value").field("num", 3.0 + i).endObject()) ); } @@ -245,67 +235,76 @@ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { // Test Gauss - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + for (int i = 0; i < numDummyDocs; i++) { + assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); + } + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + for (int i = 0; i < numDummyDocs; i++) { + assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); + } + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } // Test Lin - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) ) - ) - ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); } public void testBoostModeSettingWorks() throws Exception { @@ -330,9 +329,7 @@ public void testBoostModeSettingWorks() throws Exception { List indexBuilders = new ArrayList<>(); indexBuilders.add( - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value value") @@ -344,9 +341,7 @@ public void testBoostModeSettingWorks() throws Exception { ) ); indexBuilders.add( - client().prepareIndex() - .setId("2") - .setIndex("test") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -364,48 +359,56 @@ public void testBoostModeSettingWorks() throws Exception { lonlat.add(20f); lonlat.add(11f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.MULTIPLY + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.MULTIPLY + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - // Test Exp - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(termQuery("test", "value"))) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().query(termQuery("test", "value"))) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.REPLACE + + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.REPLACE + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); } @@ -427,9 +430,7 @@ public void testParseGeoPoint() throws Exception { ) ); - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -446,34 +447,44 @@ public void testParseGeoPoint() throws Exception { ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); GeoPoint point = new GeoPoint(20, 11); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode(CombineFunction.REPLACE) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode( + CombineFunction.REPLACE + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); // this is equivalent to new GeoPoint(20, 11); just flipped so scores must be same float[] coords = { 11, 20 }; - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode(CombineFunction.REPLACE) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode( + CombineFunction.REPLACE + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); } public void testCombineModes() throws Exception { @@ -494,9 +505,7 @@ public void testCombineModes() throws Exception { ) ); - client().prepareIndex() - .setId("1") - .setIndex("test") + prepareIndex("test").setId("1") .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field("test", "value value").field("num", 1.0).endObject()) .get(); @@ -505,95 +514,120 @@ public void testCombineModes() throws Exception { ScoreFunctionBuilders.weightFactorFunction(2) ); // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.MULTIPLY + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MULTIPLY + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); + } ); - SearchResponse sr = response.actionGet(); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.REPLACE + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.REPLACE + ) ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.SUM) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.SUM + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(2.0 + 0.5, 1.e-5)); + logger.info( + "--> Hit[0] {} Explanation:\n {}", + response.getHits().getAt(0).getId(), + response.getHits().getAt(0).getExplanation() + ); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(2.0 + 0.5, 1.e-5)); - logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).getId(), sr.getHits().getAt(0).getExplanation()); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.AVG) + + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.AVG + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo((2.0 + 0.5) / 2, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo((2.0 + 0.5) / 2, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MIN) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MIN + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(0.5, 1.e-5)); - - response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MAX) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MAX + ) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(2.0, 1.e-5)); + } ); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(2.0, 1.e-5)); - } public void testExceptionThrownIfScaleLE0() throws Exception { @@ -623,18 +657,18 @@ public void testExceptionThrownIfScaleLE0() throws Exception { ).actionGet(); refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d"))) - ) + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")) + ) + ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testParseDateMath() throws Exception { @@ -670,24 +704,23 @@ public void testParseDateMath() throws Exception { ).actionGet(); refresh(); - SearchResponse sr = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) - ) - ).get(); - - assertNoFailures(sr); - assertOrderedSearchHits(sr, "1", "2"); - - sr = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))) - ) - ).get(); - - assertNoFailures(sr); - assertOrderedSearchHits(sr, "2", "1"); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) + ) + ), + response -> assertOrderedSearchHits(response, "1", "2") + ); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))) + ) + ), + response -> assertOrderedSearchHits(response, "2", "1") + ); } public void testValueMissingLin() throws Exception { @@ -729,32 +762,31 @@ public void testValueMissingLin() throws Exception { refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery( - baseQuery, - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("num1", "2013-05-28", "+3d")), - new FilterFunctionBuilder(linearDecayFunction("num2", "0.0", "1")) } - ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery( + baseQuery, + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("num1", "2013-05-28", "+3d")), + new FilterFunctionBuilder(linearDecayFunction("num2", "0.0", "1")) } + ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(4)); + double[] scores = new double[4]; + for (int i = 0; i < sh.getHits().length; i++) { + scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); + } + assertThat(scores[0], lessThan(scores[1])); + assertThat(scores[2], lessThan(scores[3])); + } ); - - SearchResponse sr = response.actionGet(); - - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(4)); - double[] scores = new double[4]; - for (int i = 0; i < sh.getHits().length; i++) { - scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); - } - assertThat(scores[0], lessThan(scores[1])); - assertThat(scores[2], lessThan(scores[3])); - } public void testDateWithoutOrigin() throws Exception { @@ -810,32 +842,32 @@ public void testDateWithoutOrigin() throws Exception { refresh(); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery( - QueryBuilders.matchAllQuery(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")), - new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")), - new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d")) } - ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery( + QueryBuilders.matchAllQuery(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")), + new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")), + new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d")) } + ).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(3)); + double[] scores = new double[4]; + for (int i = 0; i < sh.getHits().length; i++) { + scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); + } + assertThat(scores[1], lessThan(scores[0])); + assertThat(scores[2], lessThan(scores[1])); + } ); - - SearchResponse sr = response.actionGet(); - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(3)); - double[] scores = new double[4]; - for (int i = 0; i < sh.getHits().length; i++) { - scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore(); - } - assertThat(scores[1], lessThan(scores[0])); - assertThat(scores[2], lessThan(scores[1])); - } public void testManyDocsLin() throws Exception { @@ -871,9 +903,7 @@ public void testManyDocsLin() throws Exception { String date = "2013-05-" + dayString; indexBuilders.add( - client().prepareIndex() - .setId(Integer.toString(i)) - .setIndex("test") + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("test", "value") @@ -891,33 +921,34 @@ public void testManyDocsLin() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDocs) - .query( - functionScoreQuery( - termQuery("test", "value"), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(linearDecayFunction("date", "2013-05-30", "+15d")), - new FilterFunctionBuilder(linearDecayFunction("geo", lonlat, "1000km")), - new FilterFunctionBuilder(linearDecayFunction("num", numDocs, numDocs / 2.0)) } - ).scoreMode(ScoreMode.MULTIPLY).boostMode(CombineFunction.REPLACE) - ) - ) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDocs) + .query( + functionScoreQuery( + termQuery("test", "value"), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(linearDecayFunction("date", "2013-05-30", "+15d")), + new FilterFunctionBuilder(linearDecayFunction("geo", lonlat, "1000km")), + new FilterFunctionBuilder(linearDecayFunction("num", numDocs, numDocs / 2.0)) } + ).scoreMode(ScoreMode.MULTIPLY).boostMode(CombineFunction.REPLACE) + ) + ) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(numDocs)); + double[] scores = new double[numDocs]; + for (int i = 0; i < numDocs; i++) { + scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore(); + } + for (int i = 0; i < numDocs - 1; i++) { + assertThat(scores[i], lessThan(scores[i + 1])); + } + } ); - - SearchResponse sr = response.actionGet(); - assertNoFailures(sr); - SearchHits sh = sr.getHits(); - assertThat(sh.getHits().length, equalTo(numDocs)); - double[] scores = new double[numDocs]; - for (int i = 0; i < numDocs; i++) { - scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore(); - } - for (int i = 0; i < numDocs - 1; i++) { - assertThat(scores[i], lessThan(scores[i + 1])); - } } public void testParsingExceptionIfFieldDoesNotExist() throws Exception { @@ -953,23 +984,22 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { List lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDocs) - .query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("type.geo", lonlat, "1000km")).scoreMode( - FunctionScoreQuery.ScoreMode.MULTIPLY + + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().size(numDocs) + .query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("type.geo", lonlat, "1000km")).scoreMode( + FunctionScoreQuery.ScoreMode.MULTIPLY + ) ) - ) - ) + ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { @@ -996,20 +1026,20 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { ).actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 0.5)).scoreMode(ScoreMode.MULTIPLY) + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 0.5)).scoreMode( + ScoreMode.MULTIPLY + ) + ) ) - ) + ).actionGet() ); - try { - response.actionGet(); - fail("Expected SearchPhaseExecutionException"); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), is("all shards failed")); - } + assertThat(e.getMessage(), is("all shards failed")); } public void testNoQueryGiven() throws Exception { @@ -1033,15 +1063,17 @@ public void testNoQueryGiven() throws Exception { .actionGet(); refresh(); // so, we indexed a string field, but now we try to score a num field - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(linearDecayFunction("num", 1, 0.5)).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + assertResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().query( + functionScoreQuery(linearDecayFunction("num", 1, 0.5)).scoreMode(FunctionScoreQuery.ScoreMode.MULTIPLY) + ) ) - ) + ), + response -> {} ); - response.actionGet(); } public void testMultiFieldOptions() throws Exception { @@ -1066,9 +1098,7 @@ public void testMultiFieldOptions() throws Exception { ); // Index for testing MIN and MAX - IndexRequestBuilder doc1 = client().prepareIndex() - .setId("1") - .setIndex("test") + IndexRequestBuilder doc1 = prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -1084,9 +1114,7 @@ public void testMultiFieldOptions() throws Exception { .endArray() .endObject() ); - IndexRequestBuilder doc2 = client().prepareIndex() - .setId("2") - .setIndex("test") + IndexRequestBuilder doc2 = prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("test", "value") @@ -1099,80 +1127,87 @@ public void testMultiFieldOptions() throws Exception { indexRandom(true, doc1, doc2); - ActionFuture response = client().search(new SearchRequest(new String[] {}).source(searchSource().query(baseQuery))); - SearchResponse sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - SearchHits sh = sr.getHits(); - assertThat(sh.getTotalHits().value, equalTo((long) (2))); + assertResponse(client().search(new SearchRequest(new String[] {}).source(searchSource().query(baseQuery))), response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value, equalTo((long) (2))); + }); List lonlat = new ArrayList<>(); lonlat.add(20f); lonlat.add(10f); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); // Now test AVG and SUM - doc1 = client().prepareIndex() - .setId("1") - .setIndex("test") + doc1 = prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject().field("test", "value").startArray("num").value(0.0).value(1.0).value(2.0).endArray().endObject() ); - doc2 = client().prepareIndex() - .setId("2") - .setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()); + doc2 = prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()); indexRandom(true, doc1, doc2); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + + assertThat(sh.getAt(0).getId(), equalTo("2")); + assertThat(sh.getAt(1).getId(), equalTo("1")); + assertThat(1.0 - sh.getAt(0).getScore(), closeTo((1.0 - sh.getAt(1).getScore()) / 3.0, 1.e-6d)); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - - assertThat(sh.getAt(0).getId(), equalTo("2")); - assertThat(sh.getAt(1).getId(), equalTo("1")); - assertThat(1.0 - sh.getAt(0).getScore(), closeTo((1.0 - sh.getAt(1).getScore()) / 3.0, 1.e-6d)); - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) + ) ) - ) + ), + response -> { + assertSearchHits(response, "1", "2"); + SearchHits sh = response.getHits(); + assertThat((double) (sh.getAt(0).getScore()), closeTo((sh.getAt(1).getScore()), 1.e-6d)); + } ); - sr = response.actionGet(); - assertSearchHits(sr, "1", "2"); - sh = sr.getHits(); - assertThat((double) (sh.getAt(0).getScore()), closeTo((sh.getAt(1).getScore()), 1.e-6d)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index e9ce09f7455a2..ee60888d7a0a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.settings.Settings; @@ -41,12 +40,13 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -112,40 +112,41 @@ protected Collection> nodePlugins() { return Arrays.asList(ExplainableScriptPlugin.class); } - public void testExplainScript() throws InterruptedException, IOException { + public void testExplainScript() throws InterruptedException, IOException, ExecutionException { List indexRequests = new ArrayList<>(); for (int i = 0; i < 20; i++) { indexRequests.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource(jsonBuilder().startObject().field("number_field", i).field("text", "text").endObject()) ); } indexRandom(true, true, indexRequests); client().admin().indices().prepareRefresh().get(); ensureYellow(); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().explain(true) - .query( - functionScoreQuery( - termQuery("text", "text"), - scriptFunction(new Script(ScriptType.INLINE, "test", "explainable_script", Collections.emptyMap())) - ).boostMode(CombineFunction.REPLACE) - ) - ) - ).actionGet(); - - assertNoFailures(response); - SearchHits hits = response.getHits(); - assertThat(hits.getTotalHits().value, equalTo(20L)); - int idCounter = 19; - for (SearchHit hit : hits.getHits()) { - assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); - assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); - assertThat(hit.getExplanation().getDetails().length, equalTo(2)); - idCounter--; - } + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source( + searchSource().explain(true) + .query( + functionScoreQuery( + termQuery("text", "text"), + scriptFunction(new Script(ScriptType.INLINE, "test", "explainable_script", Collections.emptyMap())) + ).boostMode(CombineFunction.REPLACE) + ) + ) + ), + response -> { + SearchHits hits = response.getHits(); + assertThat(hits.getTotalHits().value, equalTo(20L)); + int idCounter = 19; + for (SearchHit hit : hits.getHits()) { + assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); + assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); + assertThat(hit.getExplanation().getDetails().length, equalTo(2)); + idCounter--; + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java index 61cccfdf114b1..0a43255967dcd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -9,9 +9,9 @@ package org.elasticsearch.search.functionscore; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.search.function.FieldValueFactorFunction; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; @@ -20,8 +20,8 @@ import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; /** @@ -46,9 +46,9 @@ public void testFieldValueFactor() throws IOException { ) ); - client().prepareIndex("test").setId("1").setSource("test", 5, "body", "foo").get(); - client().prepareIndex("test").setId("2").setSource("test", 17, "body", "foo").get(); - client().prepareIndex("test").setId("3").setSource("body", "bar").get(); + prepareIndex("test").setId("1").setSource("test", 5, "body", "foo").get(); + prepareIndex("test").setId("2").setSource("test", 17, "body", "foo").get(); + prepareIndex("test").setId("3").setSource("body", "bar").get(); refresh(); @@ -88,10 +88,11 @@ public void testFieldValueFactor() throws IOException { // doc 3 doesn't have a "test" field, so an exception will be thrown try { - SearchResponse response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("test"))) - .get(); - assertFailures(response); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("test"))), + ElasticsearchAssertions::assertFailures + ); } catch (SearchPhaseExecutionException e) { // We are expecting an exception, because 3 has no field } @@ -111,30 +112,32 @@ public void testFieldValueFactor() throws IOException { ); // field is not mapped but we're defaulting it to 100 so all documents should have the same score - SearchResponse response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery( - functionScoreQuery( - matchAllQuery(), - fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100) - ) - ) - .get(); - assertEquals(response.getHits().getAt(0).getScore(), response.getHits().getAt(2).getScore(), 0); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery( + functionScoreQuery( + matchAllQuery(), + fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100) + ) + ), + response -> assertEquals(response.getHits().getAt(0).getScore(), response.getHits().getAt(2).getScore(), 0) + ); - client().prepareIndex("test").setId("2").setSource("test", -1, "body", "foo").get(); + prepareIndex("test").setId("2").setSource("test", -1, "body", "foo").get(); refresh(); // -1 divided by 0 is infinity, which should provoke an exception. try { - response = prepareSearch("test").setExplain(randomBoolean()) - .setQuery( - functionScoreQuery( - simpleQueryStringQuery("foo"), - fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).factor(0) - ) - ) - .get(); - assertFailures(response); + assertResponse( + prepareSearch("test").setExplain(randomBoolean()) + .setQuery( + functionScoreQuery( + simpleQueryStringQuery("foo"), + fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).factor(0) + ) + ), + ElasticsearchAssertions::assertFailures + ); } catch (SearchPhaseExecutionException e) { // This is fine, the query will throw an exception if executed // locally, instead of just having failures diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index e32abeb481a2a..bcecc49c2d463 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -41,6 +41,8 @@ import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -76,7 +78,7 @@ protected Map, Object>> pluginScripts() { } } - public void testScriptScoresNested() throws IOException { + public void testScriptScoresNested() throws Exception { createIndex(INDEX); index(INDEX, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); @@ -84,39 +86,46 @@ public void testScriptScoresNested() throws IOException { Script scriptOne = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1", Collections.emptyMap()); Script scriptTwo = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - functionScoreQuery(functionScoreQuery(scriptFunction(scriptOne)), scriptFunction(scriptTwo)), - scriptFunction(scriptTwo) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + functionScoreQuery(functionScoreQuery(scriptFunction(scriptOne)), scriptFunction(scriptTwo)), + scriptFunction(scriptTwo) + ) ) ) - ) - ).actionGet(); - assertNoFailures(response); - assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); + ), + response -> assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)) + ); } - public void testScriptScoresWithAgg() throws IOException { + public void testScriptScoresWithAgg() throws Exception { createIndex(INDEX); index(INDEX, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script))).aggregation(terms("score_agg").script(script)) - ) - ).actionGet(); - assertNoFailures(response); - assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); - assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), equalTo("1.0")); - assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script))).aggregation(terms("score_agg").script(script)) + ) + ), + response -> { + assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); + assertThat( + ((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), + equalTo("1.0") + ); + assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); + } + ); } - public void testMinScoreFunctionScoreBasic() throws IOException { + public void testMinScoreFunctionScoreBasic() throws Exception { float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); index( @@ -130,34 +139,42 @@ public void testMinScoreFunctionScoreBasic() throws IOException { ensureYellow(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); - SearchResponse searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)) - ) - ).actionGet(); - if (score < minScore) { - assertThat(searchResponse.getHits().getTotalHits().value, is(0L)); - } else { - assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); - } + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)) + ) + ), + response -> { + if (score < minScore) { + assertThat(response.getHits().getTotalHits().value, is(0L)); + } else { + assertThat(response.getHits().getTotalHits().value, is(1L)); + } + } + ); - searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ) ) - ) - ).actionGet(); - if (score < minScore) { - assertThat(searchResponse.getHits().getTotalHits().value, is(0L)); - } else { - assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); - } + ), + response -> { + if (score < minScore) { + assertThat(response.getHits().getTotalHits().value, is(0L)); + } else { + assertThat(response.getHits().getTotalHits().value, is(1L)); + } + } + ); } public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOException, ExecutionException, InterruptedException { @@ -166,7 +183,7 @@ public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOExcept int scoreOffset = randomIntBetween(0, 2 * numDocs); int minScore = randomIntBetween(0, 2 * numDocs); for (int i = 0; i < numDocs; i++) { - docs.add(client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("num", i + scoreOffset)); + docs.add(prepareIndex(INDEX).setId(Integer.toString(i)).setSource("num", i + scoreOffset)); } indexRandom(true, docs); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return (doc['num'].value)", Collections.emptyMap()); @@ -178,26 +195,33 @@ public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOExcept numMatchingDocs = numDocs; } - SearchResponse searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs) - ) - ).actionGet(); - assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs); - - searchResponse = client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) - ).size(numDocs) - ) - ).actionGet(); - assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs); + final int finalNumMatchingDocs = numMatchingDocs; + + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs) + ) + ), + response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) + ); + + assertResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ).size(numDocs) + ) + ), + response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) + ); + } protected void assertMinScoreSearchResponses(int numDocs, SearchResponse searchResponse, int numMatchingDocs) { @@ -216,35 +240,38 @@ public void testWithEmptyFunctions() throws IOException, ExecutionException, Int index("test", "1", jsonBuilder().startObject().field("text", "test text").endObject()); refresh(); - SearchResponse termQuery = client().search( - new SearchRequest(new String[] {}).source(searchSource().explain(true).query(termQuery("text", "text"))) - ).get(); - assertNoFailures(termQuery); - assertThat(termQuery.getHits().getTotalHits().value, equalTo(1L)); - float termQueryScore = termQuery.getHits().getAt(0).getScore(); - + float[] termQueryScore = new float[1]; + assertNoFailuresAndResponse( + client().search(new SearchRequest(new String[] {}).source(searchSource().explain(true).query(termQuery("text", "text")))), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + termQueryScore[0] = response.getHits().getAt(0).getScore(); + } + ); for (CombineFunction combineFunction : CombineFunction.values()) { - testMinScoreApplied(combineFunction, termQueryScore); + testMinScoreApplied(combineFunction, termQueryScore[0]); } } protected void testMinScoreApplied(CombineFunction boostMode, float expectedScore) throws InterruptedException, ExecutionException { - SearchResponse response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(0.1f)) - ) - ).get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getScore(), equalTo(expectedScore)); - - response = client().search( - new SearchRequest(new String[] {}).source( - searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(2f)) - ) - ).get(); - - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(0.1f)) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getScore(), equalTo(expectedScore)); + } + ); + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).source( + searchSource().explain(true).query(functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(2f)) + ) + ), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(0L)) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index 5c9c54a0d3b19..396af7e8501cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -11,10 +11,8 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; @@ -29,7 +27,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; import java.util.Arrays; @@ -40,6 +37,7 @@ import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -81,19 +79,19 @@ public void testPlugin() throws Exception { client().admin().indices().prepareRefresh().get(); DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d"); - ActionFuture response = client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value"), gfb))) + assertNoFailuresAndResponse( + client().search( + new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) + .source(searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value"), gfb))) + ), + response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getHits().length, equalTo(2)); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + } ); - SearchResponse sr = response.actionGet(); - ElasticsearchAssertions.assertNoFailures(sr); - SearchHits sh = sr.getHits(); - - assertThat(sh.getHits().length, equalTo(2)); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - } public static class CustomDistanceScorePlugin extends Plugin implements SearchPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 14df03bb86e8d..c608c253c851b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -49,6 +49,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFourthHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -66,35 +68,37 @@ public void testEnforceWindowSize() { // this int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("f", Integer.toString(i)).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("f", Integer.toString(i)).get(); } refresh(); int numShards = getNumShards("test").numPrimaries; for (int j = 0; j < iters; j++) { - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchAllQuery()) - .setRescorer( - new QueryRescorerBuilder( - functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.weightFactorFunction(100)).boostMode( - CombineFunction.REPLACE - ).queryName("hello world") - ).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f), - 1 - ) - .setSize(randomIntBetween(2, 10)) - .get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasScore(100.f)); - int numDocsWith100AsAScore = 0; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - float score = searchResponse.getHits().getHits()[i].getScore(); - if (score == 100f) { - numDocsWith100AsAScore += 1; + assertNoFailuresAndResponse( + prepareSearch().setQuery(QueryBuilders.matchAllQuery()) + .setRescorer( + new QueryRescorerBuilder( + functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.weightFactorFunction(100)).boostMode( + CombineFunction.REPLACE + ).queryName("hello world") + ).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f), + 1 + ) + .setSize(randomIntBetween(2, 10)), + response -> { + assertFirstHit(response, hasScore(100.f)); + int numDocsWith100AsAScore = 0; + for (int i = 0; i < response.getHits().getHits().length; i++) { + float score = response.getHits().getHits()[i].getScore(); + if (score == 100f) { + numDocsWith100AsAScore += 1; + } + } + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + // we cannot assert that they are equal since some shards might not have docs at all + assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards)); } - } - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - // we cannot assert that they are equal since some shards might not have docs at all - assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards)); + ); } } @@ -114,46 +118,47 @@ public void testRescorePhrase() throws Exception { ).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1)) ); - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); - client().prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree ").get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); + prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree ").get(); + prepareIndex("test").setId("3") .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR) - ) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2), - 5 - ) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(3)), 5) - .get(); - - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2), + 5 + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(3)), 5), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + } + ); } public void testMoreDocs() throws Exception { @@ -173,78 +178,77 @@ public void testMoreDocs() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping).setSettings(builder.put("index.number_of_shards", 1))); - client().prepareIndex("test").setId("1").setSource("field1", "massachusetts avenue boston massachusetts").get(); - client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts").get(); - client().prepareIndex("test").setId("3").setSource("field1", "boston avenue lexington massachusetts").get(); + prepareIndex("test").setId("1").setSource("field1", "massachusetts avenue boston massachusetts").get(); + prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts").get(); + prepareIndex("test").setId("3").setSource("field1", "boston avenue lexington massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("4").setSource("field1", "boston road lexington massachusetts").get(); - client().prepareIndex("test").setId("5").setSource("field1", "lexington street lexington massachusetts").get(); - client().prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); - client().prepareIndex("test").setId("7").setSource("field1", "bosten street san franciso california").get(); + prepareIndex("test").setId("4").setSource("field1", "boston road lexington massachusetts").get(); + prepareIndex("test").setId("5").setSource("field1", "lexington street lexington massachusetts").get(); + prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); + prepareIndex("test").setId("7").setSource("field1", "bosten street san franciso california").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("8").setSource("field1", "hollywood boulevard los angeles california").get(); - client().prepareIndex("test").setId("9").setSource("field1", "1st street boston massachussetts").get(); - client().prepareIndex("test").setId("10").setSource("field1", "1st street boston massachusetts").get(); + prepareIndex("test").setId("8").setSource("field1", "hollywood boulevard los angeles california").get(); + prepareIndex("test").setId("9").setSource("field1", "1st street boston massachussetts").get(); + prepareIndex("test").setId("10").setSource("field1", "1st street boston massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("11").setSource("field1", "2st street boston massachusetts").get(); - client().prepareIndex("test").setId("12").setSource("field1", "3st street boston massachusetts").get(); + prepareIndex("test").setId("11").setSource("field1", "2st street boston massachusetts").get(); + prepareIndex("test").setId("12").setSource("field1", "3st street boston massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(0) - .setSize(5) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertFirstHit(response, hasId("2")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + } + ); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("2")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + } + ); // Make sure non-zero from works: - searchResponse = prepareSearch().setQuery( - QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR) - ) - .setFrom(2) - .setSize(5) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 20 - ) - .get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(5)); - assertHitCount(searchResponse, 9); - assertThat(searchResponse.getHits().getMaxScore(), greaterThan(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) + .setFrom(2) + .setSize(5) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 20 + ), + response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertThat(response.getHits().getMaxScore(), greaterThan(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + } + ); } // Tests a rescore window smaller than number of hits: @@ -265,63 +269,66 @@ public void testSmallRescoreWindow() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping).setSettings(builder.put("index.number_of_shards", 1))); - client().prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); - client().prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); + prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); + prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); - client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); + prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); + prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse(prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")).setFrom(0).setSize(5), response -> { + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + }); // Now, rescore only top 2 hits w/ proximity: - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 2 - ) - .get(); - // Only top 2 hits were re-ordered: - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("6")); - assertSecondHit(searchResponse, hasId("3")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 2 + ), + response -> { + // Only top 2 hits were re-ordered: + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("6")); + assertSecondHit(response, hasId("3")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + } + ); // Now, rescore only top 3 hits w/ proximity: - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) - .setRescoreQueryWeight(2.0f), - 3 - ) - .get(); - - // Only top 3 hits were re-ordered: - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("3")); - assertFourthHit(searchResponse, hasId("2")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts")) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) + .setRescoreQueryWeight(2.0f), + 3 + ), + response -> { + // Only top 3 hits were re-ordered: + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + assertFourthHit(response, hasId("2")); + } + ); } // Tests a rescorer that penalizes the scores: @@ -342,42 +349,44 @@ public void testRescorerMadeScoresWorse() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping(mapping).setSettings(builder.put("index.number_of_shards", 1))); - client().prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); - client().prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); + prepareIndex("test").setId("3").setSource("field1", "massachusetts").get(); + prepareIndex("test").setId("6").setSource("field1", "massachusetts avenue lexington massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - client().prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); - client().prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); + prepareIndex("test").setId("1").setSource("field1", "lexington massachusetts avenue").get(); + prepareIndex("test").setId("2").setSource("field1", "lexington avenue boston massachusetts road").get(); indicesAdmin().prepareRefresh("test").get(); - SearchResponse searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) - .setFrom(0) - .setSize(5) - .get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(4)); - assertHitCount(searchResponse, 4); - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("6")); - assertThirdHit(searchResponse, hasId("1")); - assertFourthHit(searchResponse, hasId("2")); - + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)).setFrom(0).setSize(5), + response -> { + assertThat(response.getHits().getHits().length, equalTo(4)); + assertHitCount(response, 4); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("1")); + assertFourthHit(response, hasId("2")); + } + ); // Now, penalizing rescore (nothing matches the rescore query): - searchResponse = prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) - .setFrom(0) - .setSize(5) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(1.0f) - .setRescoreQueryWeight(-1f), - 3 - ) - .get(); - - // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead: - assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("6")); - assertFourthHit(searchResponse, hasId("1")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(Operator.OR)) + .setFrom(0) + .setSize(5) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(1.0f) + .setRescoreQueryWeight(-1f), + 3 + ), + response -> { + // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead: + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("6")); + assertFourthHit(response, hasId("1")); + } + ); } // Comparator that sorts hits and rescored hits in the same way. @@ -430,43 +439,46 @@ public void testEquivalence() throws Exception { int rescoreWindow = between(1, 3) * resultSize; String intToEnglish = English.intToEnglish(between(0, numDocs - 1)); String query = intToEnglish.split(" ")[0]; - SearchResponse rescored = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .setRescorer( - new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", intToEnglish).slop(3))).setQueryWeight(1.0f) - // no weight - so we basically use the same score as the actual query - .setRescoreQueryWeight(0.0f), - rescoreWindow - ) - .get(); - - SearchResponse plain = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .get(); - - // check equivalence - assertEquivalent(query, plain, rescored); - rescored = prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .setRescorer( - new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", "not in the index").slop(3))).setQueryWeight( - 1.0f - ).setRescoreQueryWeight(1.0f), - rescoreWindow - ) - .get(); - // check equivalence - assertEquivalent(query, plain, rescored); + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize), + plain -> { + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize) + .setRescorer( + new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", intToEnglish).slop(3))) + .setQueryWeight(1.0f) + // no weight - so we basically use the same score as the actual query + .setRescoreQueryWeight(0.0f), + rescoreWindow + ), + rescored -> assertEquivalent(query, plain, rescored) + ); // check equivalence + + assertResponse( + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference("test") // ensure we hit the same shards for tie-breaking + .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) + .setFrom(0) + .setSize(resultSize) + .setRescorer( + new QueryRescorerBuilder(constantScoreQuery(matchPhraseQuery("field1", "not in the index").slop(3))) + .setQueryWeight(1.0f) + .setRescoreQueryWeight(1.0f), + rescoreWindow + ), + rescored -> assertEquivalent(query, plain, rescored) + ); // check equivalence + } + ); } } @@ -486,48 +498,50 @@ public void testExplain() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); - client().prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox").get(); + prepareIndex("test").setId("2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").get(); + prepareIndex("test").setId("3") .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); { - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer( - new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f)).setQueryWeight(0.5f) - .setRescoreQueryWeight(0.4f), - 5 - ) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int i = 0; i < 3; i++) { - assertThat(searchResponse.getHits().getAt(i).getExplanation(), notNullValue()); - assertThat(searchResponse.getHits().getAt(i).getExplanation().isMatch(), equalTo(true)); - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails()[0].isMatch(), equalTo(true)); - if (i == 2) { - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDetails()[1].getValue(), equalTo(0.5f)); - } else { - assertThat(searchResponse.getHits().getAt(i).getExplanation().getDescription(), equalTo("sum of:")); - assertThat( - searchResponse.getHits().getAt(i).getExplanation().getDetails()[0].getDetails()[1].getValue(), - equalTo(0.5f) - ); - assertThat( - searchResponse.getHits().getAt(i).getExplanation().getDetails()[1].getDetails()[1].getValue(), - equalTo(0.4f) - ); + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer( + new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f)).setQueryWeight(0.5f) + .setRescoreQueryWeight(0.4f), + 5 + ) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int i = 0; i < 3; i++) { + assertThat(response.getHits().getAt(i).getExplanation(), notNullValue()); + assertThat(response.getHits().getAt(i).getExplanation().isMatch(), equalTo(true)); + assertThat(response.getHits().getAt(i).getExplanation().getDetails().length, equalTo(2)); + assertThat(response.getHits().getAt(i).getExplanation().getDetails()[0].isMatch(), equalTo(true)); + if (i == 2) { + assertThat(response.getHits().getAt(i).getExplanation().getDetails()[1].getValue(), equalTo(0.5f)); + } else { + assertThat(response.getHits().getAt(i).getExplanation().getDescription(), equalTo("sum of:")); + assertThat( + response.getHits().getAt(i).getExplanation().getDetails()[0].getDetails()[1].getValue(), + equalTo(0.5f) + ); + assertThat( + response.getHits().getAt(i).getExplanation().getDetails()[1].getDetails()[1].getValue(), + equalTo(0.4f) + ); + } + } } - } + ); } String[] scoreModes = new String[] { "max", "min", "avg", "total", "multiply", "" }; @@ -540,21 +554,26 @@ public void testExplain() throws Exception { if ("".equals(scoreModes[innerMode]) == false) { innerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[innerMode])); } - - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(innerRescoreQuery, 5) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int j = 0; j < 3; j++) { - assertThat(searchResponse.getHits().getAt(j).getExplanation().getDescription(), equalTo(descriptionModes[innerMode])); - } - + final int finalInnerMode = innerMode; + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setRescorer(innerRescoreQuery, 5) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int j = 0; j < 3; j++) { + assertThat( + response.getHits().getAt(j).getExplanation().getDescription(), + equalTo(descriptionModes[finalInnerMode]) + ); + } + } + ); for (int outerMode = 0; outerMode < scoreModes.length; outerMode++) { QueryRescorerBuilder outerRescoreQuery = new QueryRescorerBuilder(matchQuery("field1", "the quick brown").boost(4.0f)) .setQueryWeight(0.5f) @@ -563,23 +582,29 @@ public void testExplain() throws Exception { if ("".equals(scoreModes[outerMode]) == false) { outerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[outerMode])); } - - searchResponse = prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .addRescorer(innerRescoreQuery, 5) - .addRescorer(outerRescoreQuery.windowSize(10)) - .setExplain(true) - .get(); - assertHitCount(searchResponse, 3); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - for (int j = 0; j < 3; j++) { - Explanation explanation = searchResponse.getHits().getAt(j).getExplanation(); - assertThat(explanation.getDescription(), equalTo(descriptionModes[outerMode])); - assertThat(explanation.getDetails()[0].getDetails()[0].getDescription(), equalTo(descriptionModes[innerMode])); - } + final int finalOuterMode = outerMode; + assertResponse( + prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .addRescorer(innerRescoreQuery, 5) + .addRescorer(outerRescoreQuery.windowSize(10)) + .setExplain(true), + response -> { + assertHitCount(response, 3); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + + for (int j = 0; j < 3; j++) { + Explanation explanation = response.getHits().getAt(j).getExplanation(); + assertThat(explanation.getDescription(), equalTo(descriptionModes[finalOuterMode])); + assertThat( + explanation.getDetails()[0].getDetails()[0].getDescription(), + equalTo(descriptionModes[finalInnerMode]) + ); + } + } + ); } } } @@ -617,58 +642,66 @@ public void testScoring() throws Exception { if ("".equals(scoreMode) == false) { rescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreMode)); } - - SearchResponse rescored = prepareSearch().setPreference("test") // ensure we hit the same shards for tie-breaking - .setFrom(0) - .setSize(10) - .setQuery(query) - .setRescorer(rescoreQuery, 50) - .get(); - - assertHitCount(rescored, 4); - - assertThat(rescored.getHits().getMaxScore(), equalTo(rescored.getHits().getHits()[0].getScore())); - if ("total".equals(scoreMode) || "".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight)); - } else if ("max".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight)); - } else if ("min".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 2))); - assertSecondHit(rescored, hasId(String.valueOf(i + 1))); - assertThirdHit(rescored, hasId(String.valueOf(i))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight)); - } else if ("avg".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i + 2))); - assertThirdHit(rescored, hasId(String.valueOf(i))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f)); - } else if ("multiply".equals(scoreMode)) { - assertFirstHit(rescored, hasId(String.valueOf(i + 1))); - assertSecondHit(rescored, hasId(String.valueOf(i))); - assertThirdHit(rescored, hasId(String.valueOf(i + 2))); - assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight)); - assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); - assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight)); - } + final int finalI = i; + assertResponse( + prepareSearch().setPreference("test") // ensure we hit the same shards for tie-breaking + .setFrom(0) + .setSize(10) + .setQuery(query) + .setRescorer(rescoreQuery, 50), + rescored -> { + assertHitCount(rescored, 4); + + assertThat(rescored.getHits().getMaxScore(), equalTo(rescored.getHits().getHits()[0].getScore())); + if ("total".equals(scoreMode) || "".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight)); + } else if ("max".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight)); + } else if ("min".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 2))); + assertSecondHit(rescored, hasId(String.valueOf(finalI + 1))); + assertThirdHit(rescored, hasId(String.valueOf(finalI))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight)); + } else if ("avg".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThirdHit(rescored, hasId(String.valueOf(finalI))); + assertThat( + rescored.getHits().getHits()[0].getScore(), + equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f) + ); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight)); + assertThat( + rescored.getHits().getHits()[2].getScore(), + equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f) + ); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f)); + } else if ("multiply".equals(scoreMode)) { + assertFirstHit(rescored, hasId(String.valueOf(finalI + 1))); + assertSecondHit(rescored, hasId(String.valueOf(finalI))); + assertThirdHit(rescored, hasId(String.valueOf(finalI + 2))); + assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight)); + assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight)); + assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight)); + } + } + ); } } } @@ -688,13 +721,16 @@ public void testMultipleRescores() throws Exception { // First set the rescore window large enough that both rescores take effect SearchRequestBuilder request = prepareSearch(); request.addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, numDocs); - SearchResponse response = request.get(); - assertFirstHit(response, hasId("7")); - assertSecondHit(response, hasId("8")); + assertResponse(request, response -> { + assertFirstHit(response, hasId("7")); + assertSecondHit(response, hasId("8")); + }); // Now squash the second rescore window so it never gets to see a seven - response = request.setSize(1).clearRescorers().addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, 1).get(); - assertFirstHit(response, hasId("8")); + assertResponse( + request.setSize(1).clearRescorers().addRescorer(eightIsGreat, numDocs).addRescorer(sevenIsBetter, 1), + response -> assertFirstHit(response, hasId("8")) + ); // We have no idea what the second hit will be because we didn't get a chance to look for seven // Now use one rescore to drag the number we're looking for into the window of another @@ -709,11 +745,12 @@ public void testMultipleRescores() throws Exception { ) ).setScoreMode(QueryRescoreMode.Total); request.clearRescorers().addRescorer(ninetyIsGood, numDocs).addRescorer(oneToo, 10); - response = request.setSize(2).get(); - assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); - assertFirstHit(response, hasId("91")); - assertFirstHit(response, hasScore(2001.0f)); - assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something + assertResponse(request.setSize(2), response -> { + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("91")); + assertFirstHit(response, hasScore(2001.0f)); + assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something + }); } private int indexRandomNumbers(String analyzer) throws Exception { @@ -744,7 +781,7 @@ private int indexRandomNumbers(String analyzer, int shards, boolean dummyDocs) t int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i)); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i)); } indexRandom(true, dummyDocs, docs); @@ -756,7 +793,7 @@ private int indexRandomNumbers(String analyzer, int shards, boolean dummyDocs) t public void testFromSize() throws Exception { assertAcked(prepareCreate("test").setSettings(indexSettings(1, 0))); for (int i = 0; i < 5; i++) { - client().prepareIndex("test").setId("" + i).setSource("text", "hello world").get(); + prepareIndex("test").setId("" + i).setSource("text", "hello world").get(); } refresh(); @@ -772,7 +809,7 @@ public void testFromSize() throws Exception { public void testRescorePhaseWithInvalidSort() throws Exception { assertAcked(prepareCreate("test")); for (int i = 0; i < 5; i++) { - client().prepareIndex("test").setId("" + i).setSource("number", 0).get(); + prepareIndex("test").setId("" + i).setSource("number", 0).get(); } refresh(); @@ -797,14 +834,17 @@ public void testRescorePhaseWithInvalidSort() throws Exception { assertNotNull(exc.getCause()); assertThat(exc.getCause().getMessage(), containsString("Cannot use [sort] option in conjunction with [rescore].")); - SearchResponse resp = prepareSearch().addSort(SortBuilders.scoreSort()) - .setTrackScores(true) - .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50) - .get(); - assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); - assertThat(resp.getHits().getHits().length, equalTo(5)); - for (SearchHit hit : resp.getHits().getHits()) { - assertThat(hit.getScore(), equalTo(101f)); - } + assertResponse( + prepareSearch().addSort(SortBuilders.scoreSort()) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(5L)); + assertThat(response.getHits().getHits().length, equalTo(5)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(101f)); + } + } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java index ef8ffcf0d806a..8f178397f508b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.functionscore; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -37,6 +36,8 @@ import static org.elasticsearch.script.MockScriptPlugin.NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -97,35 +98,39 @@ public void testConsistentHitsWithSameSeed() throws Exception { preference = randomRealisticUnicodeOfLengthBetween(1, 10); } int innerIters = scaledRandomIntBetween(2, 5); - SearchHit[] hits = null; + final SearchHit[][] hits = new SearchHit[1][]; for (int i = 0; i < innerIters; i++) { - SearchResponse searchResponse = prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking - .setPreference(preference) - .setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField("foo"))) - .get(); - assertThat( - "Failures " + Arrays.toString(searchResponse.getShardFailures()), - searchResponse.getShardFailures().length, - CoreMatchers.equalTo(0) - ); - final int hitCount = searchResponse.getHits().getHits().length; - final SearchHit[] currentHits = searchResponse.getHits().getHits(); - ArrayUtil.timSort(currentHits, (o1, o2) -> { - // for tie-breaking we have to resort here since if the score is - // identical we rely on collection order which might change. - int cmp = Float.compare(o1.getScore(), o2.getScore()); - return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp; - }); - if (i == 0) { - assertThat(hits, nullValue()); - hits = currentHits; - } else { - assertThat(hits.length, equalTo(searchResponse.getHits().getHits().length)); - for (int j = 0; j < hitCount; j++) { - assertThat("" + j, currentHits[j].getScore(), equalTo(hits[j].getScore())); - assertThat("" + j, currentHits[j].getId(), equalTo(hits[j].getId())); + final int finalI = i; + assertResponse( + prepareSearch().setSize(docCount) // get all docs otherwise we are prone to tie-breaking + .setPreference(preference) + .setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField("foo"))), + response -> { + assertThat( + "Failures " + Arrays.toString(response.getShardFailures()), + response.getShardFailures().length, + CoreMatchers.equalTo(0) + ); + final int hitCount = response.getHits().getHits().length; + final SearchHit[] currentHits = response.getHits().getHits(); + ArrayUtil.timSort(currentHits, (o1, o2) -> { + // for tie-breaking we have to resort here since if the score is + // identical we rely on collection order which might change. + int cmp = Float.compare(o1.getScore(), o2.getScore()); + return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp; + }); + if (finalI == 0) { + assertThat(hits[0], nullValue()); + hits[0] = currentHits; + } else { + assertThat(hits[0].length, equalTo(response.getHits().getHits().length)); + for (int j = 0; j < hitCount; j++) { + assertThat("" + j, currentHits[j].getScore(), equalTo(hits[0][j].getScore())); + assertThat("" + j, currentHits[j].getId(), equalTo(hits[0][j].getId())); + } + } } - } + ); // randomly change some docs to get them in different segments int numDocsToChange = randomIntBetween(20, 50); @@ -152,8 +157,7 @@ public void testScoreAccessWithinScript() throws Exception { int docCount = randomIntBetween(100, 200); for (int i = 0; i < docCount; i++) { - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) // we add 1 to the index field to make sure that the scripts below never compute log(0) .setSource("body", randomFrom(Arrays.asList("foo", "bar", "baz")), "index", i + 1) .get(); @@ -165,73 +169,88 @@ public void testScoreAccessWithinScript() throws Exception { // Test for accessing _score Script script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score))", params); - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - SearchHit firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.intValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.intValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.longValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.longValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.floatValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.floatValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); // Test for accessing _score.doubleValue() script = new Script(ScriptType.INLINE, NAME, "log(doc['index'].value + (factor * _score.doubleValue()))", params); - resp = prepareSearch("test").setQuery( - functionScoreQuery( - matchQuery("body", "foo"), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), - new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } - ) - ).get(); - assertNoFailures(resp); - firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getScore(), greaterThan(1f)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery( + matchQuery("body", "foo"), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder(fieldValueFactorFunction("index").factor(2)), + new FunctionScoreQueryBuilder.FilterFunctionBuilder(scriptFunction(script)) } + ) + ), + response -> { + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getScore(), greaterThan(1f)); + } + ); } public void testSeedReportedInExplain() throws Exception { @@ -243,28 +262,33 @@ public void testSeedReportedInExplain() throws Exception { int seed = 12345678; - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField(SeqNoFieldMapper.NAME)) - ).setExplain(true).get(); - assertNoFailures(resp); - assertEquals(1, resp.getHits().getTotalHits().value); - SearchHit firstHit = resp.getHits().getAt(0); - assertThat(firstHit.getExplanation().toString(), containsString("" + seed)); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction().seed(seed).setField(SeqNoFieldMapper.NAME))) + .setExplain(true), + response -> { + assertNoFailures(response); + assertEquals(1, response.getHits().getTotalHits().value); + SearchHit firstHit = response.getHits().getAt(0); + assertThat(firstHit.getExplanation().toString(), containsString("" + seed)); + } + ); } public void testNoDocs() throws Exception { createIndex("test"); ensureGreen(); - SearchResponse resp = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), randomFunction().seed(1234).setField(SeqNoFieldMapper.NAME)) - ).get(); - assertNoFailures(resp); - assertEquals(0, resp.getHits().getTotalHits().value); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery( + functionScoreQuery(matchAllQuery(), randomFunction().seed(1234).setField(SeqNoFieldMapper.NAME)) + ), + response -> assertEquals(0, response.getHits().getTotalHits().value) + ); - resp = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction())).get(); - assertNoFailures(resp); - assertEquals(0, resp.getHits().getTotalHits().value); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), randomFunction())), + response -> assertEquals(0, response.getHits().getTotalHits().value) + ); } public void testScoreRange() throws Exception { @@ -280,14 +304,14 @@ public void testScoreRange() throws Exception { refresh(); int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; ++i) { - SearchResponse searchResponse = prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), randomFunction())) - .setSize(docCount) - .get(); - - assertNoFailures(searchResponse); - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertThat(hit.getScore(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f))); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), randomFunction())).setSize(docCount), + response -> { + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f))); + } + } + ); } } @@ -338,10 +362,10 @@ public void checkDistribution() throws Exception { for (int i = 0; i < count; i++) { - SearchResponse searchResponse = prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder())) - .get(); - - matrix[Integer.valueOf(searchResponse.getHits().getAt(0).getId())]++; + assertResponse( + prepareSearch().setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder())), + response -> matrix[Integer.valueOf(response.getHits().getAt(0).getId())]++ + ); } int filled = 0; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 1ade5bfa3b71e..37c78ec568332 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -109,8 +109,7 @@ public void setupTestIndex() throws IOException { } public void testDistanceScript() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "TestPosition") @@ -191,8 +190,7 @@ public void testDistanceScript() throws Exception { } public void testGeoDistanceAggregation() throws IOException { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "TestPosition") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java index c3ff5db7ebf6f..3b2d266e77cda 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java @@ -147,8 +147,7 @@ public void setupTestIndex() throws IOException { public void testRandomPoint() throws Exception { final double lat = GeometryTestUtils.randomLat(); final double lon = GeometryTestUtils.randomLon(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("name", "TestPosition").field("location", new double[] { lon, lat }).endObject()) .get(); @@ -194,7 +193,7 @@ public void testRandomMultiPoint() throws Exception { } XContentBuilder builder = jsonBuilder().startObject().field("name", "TestPosition").field("location", values).endObject(); - client().prepareIndex("test").setId("1").setSource(builder).get(); + prepareIndex("test").setId("1").setSource(builder).get(); client().admin().indices().prepareRefresh("test").get(); @@ -233,8 +232,7 @@ public void testRandomMultiPoint() throws Exception { } public void testNullPoint() throws Exception { - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("name", "TestPosition").nullField("location").endObject()) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java index 97c8aa0ea9e3e..e929487af9240 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java @@ -48,8 +48,7 @@ protected void setupSuiteScopeCluster() throws Exception { indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("name", "New York") @@ -60,8 +59,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 5.286 km - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("name", "Times Square") @@ -72,8 +70,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 0.4621 km - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("name", "Tribeca") @@ -84,8 +81,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 1.055 km - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .field("name", "Wall Street") @@ -96,8 +92,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 1.258 km - client().prepareIndex("test") - .setId("5") + prepareIndex("test").setId("5") .setSource( jsonBuilder().startObject() .field("name", "Soho") @@ -108,8 +103,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 2.029 km - client().prepareIndex("test") - .setId("6") + prepareIndex("test").setId("6") .setSource( jsonBuilder().startObject() .field("name", "Greenwich Village") @@ -120,8 +114,7 @@ protected void setupSuiteScopeCluster() throws Exception { .endObject() ), // to NY: 8.572 km - client().prepareIndex("test") - .setId("7") + prepareIndex("test").setId("7") .setSource( jsonBuilder().startObject() .field("name", "Brooklyn") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 415de06030938..d79bb903bdb6a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; @@ -41,8 +40,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -225,26 +226,36 @@ public void testMoreLikeThisWithAliases() throws Exception { ); logger.info("Running moreLikeThis on beta shard"); - SearchResponse response = prepareSearch("beta").setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - + assertResponse( + prepareSearch("beta").setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); logger.info("Running moreLikeThis on release shard"); - response = prepareSearch("release").setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertResponse( + prepareSearch("release").setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + } + ); logger.info("Running moreLikeThis on alias with node client"); - response = internalCluster().coordOnlyNodeClient() - .prepareSearch("beta") - .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1)) - .get(); - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertResponse( + internalCluster().coordOnlyNodeClient() + .prepareSearch("beta") + .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1)), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); } // Issue #14944 @@ -267,17 +278,20 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { ).actionGet(); refresh(indexName); - SearchResponse response = prepareSearch().setQuery( - new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1) - ).get(); - assertHitCount(response, 2L); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery( + new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1) + ), + response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + } + ); } public void testMoreLikeThisIssue2197() throws Exception { indicesAdmin().prepareCreate("foo").get(); - client().prepareIndex("foo") - .setId("1") + prepareIndex("foo").setId("1") .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject().endObject()) .get(); indicesAdmin().prepareRefresh("foo").get(); @@ -292,8 +306,7 @@ public void testMoreLikeWithCustomRouting() throws Exception { indicesAdmin().prepareCreate("foo").get(); ensureGreen(); - client().prepareIndex("foo") - .setId("1") + prepareIndex("foo").setId("1") .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject().endObject()) .setRouting("2") .get(); @@ -307,8 +320,7 @@ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception { assertAcked(prepareCreate("foo", 2, indexSettings(2, 0))); ensureGreen(); - client().prepareIndex("foo") - .setId("1") + prepareIndex("foo").setId("1") .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject().endObject()) .setRouting("4000") .get(); @@ -334,12 +346,10 @@ public void testNumericField() throws Exception { .endObject() ).get(); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("string_value", "lucene index").field("int_value", 1).endObject()) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("string_value", "elasticsearch index").field("int_value", 42).endObject()) .get(); @@ -547,9 +557,9 @@ public void testSimpleMoreLikeThisIds() throws Exception { logger.info("Indexing..."); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("test").setSource("text", "lucene").setId("1")); - builders.add(client().prepareIndex("test").setSource("text", "lucene release").setId("2")); - builders.add(client().prepareIndex("test").setSource("text", "apache lucene").setId("3")); + builders.add(prepareIndex("test").setSource("text", "lucene").setId("1")); + builders.add(prepareIndex("test").setSource("text", "lucene release").setId("2")); + builders.add(prepareIndex("test").setSource("text", "apache lucene").setId("3")); indexRandom(true, builders); logger.info("Running MoreLikeThis"); @@ -573,10 +583,10 @@ public void testMoreLikeThisMultiValueFields() throws Exception { String[] values = { "aaaa", "bbbb", "cccc", "dddd", "eeee", "ffff", "gggg", "hhhh", "iiii", "jjjj" }; List builders = new ArrayList<>(values.length + 1); // index one document with all the values - builders.add(client().prepareIndex("test").setId("0").setSource("text", values)); + builders.add(prepareIndex("test").setId("0").setSource("text", values)); // index each document with only one of the values for (int i = 0; i < values.length; i++) { - builders.add(client().prepareIndex("test").setId(String.valueOf(i + 1)).setSource("text", values[i])); + builders.add(prepareIndex("test").setId(String.valueOf(i + 1)).setSource("text", values[i])); } indexRandom(true, builders); @@ -608,7 +618,7 @@ public void testMinimumShouldMatch() throws ExecutionException, InterruptedExcep for (int j = 1; j <= 10 - i; j++) { text += j + " "; } - builders.add(client().prepareIndex("test").setId(i + "").setSource("text", text)); + builders.add(prepareIndex("test").setId(i + "").setSource("text", text)); } indexRandom(true, builders); @@ -620,13 +630,14 @@ public void testMinimumShouldMatch() throws ExecutionException, InterruptedExcep .minDocFreq(1) .minimumShouldMatch(minimumShouldMatch); logger.info("Testing with minimum_should_match = {}", minimumShouldMatch); - SearchResponse response = prepareSearch("test").setQuery(mltQuery).get(); - assertNoFailures(response); - if (minimumShouldMatch.equals("0%")) { - assertHitCount(response, 10); - } else { - assertHitCount(response, 11 - i); - } + final int finalI = i; + assertNoFailuresAndResponse(prepareSearch("test").setQuery(mltQuery), response -> { + if (minimumShouldMatch.equals("0%")) { + assertHitCount(response, 10); + } else { + assertHitCount(response, 11 - finalI); + } + }); } } @@ -642,7 +653,7 @@ public void testMoreLikeThisArtificialDocs() throws Exception { doc.field("field" + i, generateRandomStringArray(5, 10, false) + "a"); // make sure they are not all empty } doc.endObject(); - indexRandom(true, client().prepareIndex("test").setId("0").setSource(doc)); + indexRandom(true, prepareIndex("test").setId("0").setSource(doc)); logger.info("Checking the document matches ..."); // routing to ensure we hit the shard with the doc @@ -661,8 +672,7 @@ public void testMoreLikeThisMalformedArtificialDocs() throws Exception { logger.info("Creating an index with a single document ..."); indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "Hello World!").field("date", "2009-01-01").endObject()) ); @@ -707,7 +717,7 @@ public void testMoreLikeThisUnlike() throws InterruptedException, IOException { logger.info("Indexing each field value of this document as a single document."); List builders = new ArrayList<>(); for (int i = 0; i < numFields; i++) { - builders.add(client().prepareIndex("test").setId(i + "").setSource("field" + i, i + "")); + builders.add(prepareIndex("test").setId(i + "").setSource("field" + i, i + "")); } indexRandom(true, builders); @@ -738,11 +748,9 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("text", "hello world").field("text1", "elasticsearch").endObject()), - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("text", "goodby moon").field("text1", "elasticsearch").endObject()) ); @@ -760,9 +768,9 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt } public void testWithRouting() throws IOException { - client().prepareIndex("index").setId("1").setRouting("3").setSource("text", "this is a document").get(); - client().prepareIndex("index").setId("2").setRouting("1").setSource("text", "this is another document").get(); - client().prepareIndex("index").setId("3").setRouting("4").setSource("text", "this is yet another document").get(); + prepareIndex("index").setId("1").setRouting("3").setSource("text", "this is a document").get(); + prepareIndex("index").setId("2").setRouting("1").setSource("text", "this is another document").get(); + prepareIndex("index").setId("3").setRouting("4").setSource("text", "this is yet another document").get(); refresh("index"); Item item = new Item("index", "2").routing("1"); @@ -773,8 +781,7 @@ public void testWithRouting() throws IOException { ); moreLikeThisQueryBuilder.minTermFreq(1); moreLikeThisQueryBuilder.minDocFreq(1); - SearchResponse searchResponse = prepareSearch("index").setQuery(moreLikeThisQueryBuilder).get(); - assertEquals(2, searchResponse.getHits().getTotalHits().value); + assertHitCount(prepareSearch("index").setQuery(moreLikeThisQueryBuilder), 2L); } // Issue #29678 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java index aa418288b8ebf..7072594eab8ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.MultiSearchResponse.Item; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.DummyQueryBuilder; @@ -23,6 +23,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.hamcrest.Matchers.equalTo; @@ -39,31 +40,33 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testSimpleMultiSearch() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); - client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); + prepareIndex("test").setId("1").setSource("field", "xxx").get(); + prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); - MultiSearchResponse response = client().prepareMultiSearch() - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) - .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())) - .get(); - - for (MultiSearchResponse.Item item : response) { - assertNoFailures(item.getResponse()); - } - assertThat(response.getResponses().length, equalTo(3)); - assertHitCount(response.getResponses()[0].getResponse(), 1L); - assertHitCount(response.getResponses()[1].getResponse(), 1L); - assertHitCount(response.getResponses()[2].getResponse(), 2L); - assertFirstHit(response.getResponses()[0].getResponse(), hasId("1")); - assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); + assertResponse( + client().prepareMultiSearch() + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) + .add(prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())), + response -> { + for (Item item : response) { + assertNoFailures(item.getResponse()); + } + assertThat(response.getResponses().length, equalTo(3)); + assertHitCount(response.getResponses()[0].getResponse(), 1L); + assertHitCount(response.getResponses()[1].getResponse(), 1L); + assertHitCount(response.getResponses()[2].getResponse(), 2L); + assertFirstHit(response.getResponses()[0].getResponse(), hasId("1")); + assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); + } + ); } - public void testSimpleMultiSearchMoreRequests() { + public void testSimpleMultiSearchMoreRequests() throws Exception { createIndex("test"); int numDocs = randomIntBetween(0, 16); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); } refresh(); @@ -75,13 +78,13 @@ public void testSimpleMultiSearchMoreRequests() { for (int i = 0; i < numSearchRequests; i++) { request.add(prepareSearch("test")); } - - MultiSearchResponse response = client().multiSearch(request).actionGet(); - assertThat(response.getResponses().length, equalTo(numSearchRequests)); - for (MultiSearchResponse.Item item : response) { - assertNoFailures(item.getResponse()); - assertHitCount(item.getResponse(), numDocs); - } + assertResponse(client().multiSearch(request), response -> { + assertThat(response.getResponses().length, equalTo(numSearchRequests)); + for (Item item : response) { + assertNoFailures(item.getResponse()); + assertHitCount(item.getResponse(), numDocs); + } + }); } /** @@ -92,26 +95,28 @@ public void testCCSCheckCompatibility() throws Exception { TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); - client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); + prepareIndex("test").setId("1").setSource("field", "xxx").get(); + prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); - MultiSearchResponse response = client().prepareMultiSearch() - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) - .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) - .add(prepareSearch("test").setQuery(new DummyQueryBuilder() { - @Override - public TransportVersion getMinimalSupportedVersion() { - return transportVersion; - } - })) - .get(); - - assertThat(response.getResponses().length, equalTo(3)); - assertHitCount(response.getResponses()[0].getResponse(), 1L); - assertHitCount(response.getResponses()[1].getResponse(), 1L); - assertTrue(response.getResponses()[2].isFailure()); - assertTrue( - response.getResponses()[2].getFailure().getMessage().contains("the 'search.check_ccs_compatibility' setting is enabled") + assertResponse( + client().prepareMultiSearch() + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) + .add(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) + .add(prepareSearch("test").setQuery(new DummyQueryBuilder() { + @Override + public TransportVersion getMinimalSupportedVersion() { + return transportVersion; + } + })), + response -> { + assertThat(response.getResponses().length, equalTo(3)); + assertHitCount(response.getResponses()[0].getResponse(), 1L); + assertHitCount(response.getResponses()[1].getResponse(), 1L); + assertTrue(response.getResponses()[2].isFailure()); + assertTrue( + response.getResponses()[2].getFailure().getMessage().contains("the 'search.check_ccs_compatibility' setting is enabled") + ); + } ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java index e238a254b7843..245fb1651f4d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/NestedWithMinScoreIT.java @@ -95,7 +95,7 @@ public void testNestedWithMinScore() throws Exception { doc.endArray(); doc.endObject(); - client().prepareIndex("test").setId("d1").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).setSource(doc).get(); + prepareIndex("test").setId("d1").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).setSource(doc).get(); final BoolQueryBuilder childQuery = new BoolQueryBuilder().filter( new MatchPhraseQueryBuilder("toolTracks.data", "cash dispenser, automated teller machine, automatic teller machine") ).filter(new RangeQueryBuilder("toolTracks.confidence").from(0.8)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 736796d73f164..29a3e589e7923 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; @@ -37,7 +36,10 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -51,13 +53,10 @@ public void testSimpleNested() throws Exception { ensureGreen(); // check on no data, see it works - SearchResponse searchResponse = prepareSearch("test").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test"), 0L); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -78,36 +77,31 @@ public void testSimpleNested() throws Exception { waitForRelocation(ClusterHealthStatus.GREEN); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getSourceAsBytes(), notNullValue()); + assertThat(getResponse.getSourceAsBytesRef(), notNullValue()); refresh(); // check the numDocs assertDocumentCount("test", 3); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); // search for something that matches the nested doc, and see that we don't find the nested doc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - searchResponse = prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCount(prepareSearch("test"), 1L); + assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); // now, do a nested query - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + 1L + ); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH), + 1L + ); // add another doc, one that would match if it was not nested... - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -128,40 +122,44 @@ public void testSimpleNested() throws Exception { refresh(); assertDocumentCount("test", 6); - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) + ), + 1L + ); + ; // filter - searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()) - .mustNot( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + boolQuery().must(matchAllQuery()) + .mustNot( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) ) - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + ), + 1L + ); // check with type prefix - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")), + ScoreMode.Avg + ) + ), + 1L + ); // check delete, so all is gone... DeleteResponse deleteResponse = client().prepareDelete("test", "2").get(); @@ -170,10 +168,10 @@ public void testSimpleNested() throws Exception { refresh(); assertDocumentCount("test", 3); - searchResponse = prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), + 1L + ); } public void testMultiNested() throws Exception { @@ -197,8 +195,7 @@ public void testMultiNested() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field", "value") @@ -238,83 +235,87 @@ public void testMultiNested() throws Exception { assertDocumentCount("test", 7); // do some multi nested queries - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1", termQuery("nested1.field1", "1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "1")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "4")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = prepareSearch("test").setQuery( - nestedQuery( - "nested1", - boolQuery().must(termQuery("nested1.field1", "4")) - .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), - ScoreMode.Avg - ) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.field1", "1"), ScoreMode.Avg)), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "1")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "4")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 1L + ); + + assertHitCountAndNoFailures( + prepareSearch("test").setQuery( + nestedQuery( + "nested1", + boolQuery().must(termQuery("nested1.field1", "4")) + .must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"), ScoreMode.Avg)), + ScoreMode.Avg + ) + ), + 0L + ); } // When IncludeNestedDocsQuery is wrapped in a FilteredQuery then a in-finite loop occurs b/c of a bug in @@ -343,8 +344,7 @@ public void testDeleteNestedDocsWithAlias() throws Exception { ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -362,8 +362,7 @@ public void testDeleteNestedDocsWithAlias() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", "value2") @@ -403,8 +402,7 @@ public void testExplain() throws Exception { ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", "value1") @@ -421,14 +419,17 @@ public void testExplain() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total) - ).setExplain(true).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - Explanation explanation = searchResponse.getHits().getHits()[0].getExplanation(); - assertThat(explanation.getValue(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) + .setExplain(true), + response -> { + assertNoFailures(response); + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + Explanation explanation = response.getHits().getHits()[0].getExplanation(); + assertThat(explanation.getValue(), equalTo(response.getHits().getHits()[0].getScore())); + assertThat(explanation.toString(), startsWith("0.36464313 = Score based on 2 child docs in range from 0 to 1")); + } + ); } public void testSimpleNestedSorting() throws Exception { @@ -454,8 +455,7 @@ public void testSimpleNestedSorting() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", 1) @@ -470,8 +470,7 @@ public void testSimpleNestedSorting() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", 2) @@ -486,8 +485,7 @@ public void testSimpleNestedSorting() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("field1", 3) @@ -504,33 +502,32 @@ public void testSimpleNestedSorting() throws Exception { .get(); refresh(); - SearchResponse searchResponse = prepareSearch("test") - - .setQuery(QueryBuilders.matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedSort(new NestedSortBuilder("nested1"))) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); - - searchResponse = prepareSearch("test") - - .setQuery(QueryBuilders.matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedSort(new NestedSortBuilder("nested1"))) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("5")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC).setNestedSort(new NestedSortBuilder("nested1"))), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("4")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC).setNestedSort(new NestedSortBuilder("nested1"))), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("5")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + } + ); } public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { @@ -558,8 +555,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("field1", 1) @@ -576,8 +572,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("field1", 2) @@ -596,8 +591,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { .get(); // Doc with missing nested docs if nested filter is used refresh(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("field1", 3) @@ -628,16 +622,15 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { searchRequestBuilder.setScroll("10m"); } - SearchResponse searchResponse = searchRequestBuilder.get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); - + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("10")); + }); searchRequestBuilder = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) .addSort( SortBuilders.fieldSort("nested1.field1") @@ -650,16 +643,16 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { searchRequestBuilder.setScroll("10m"); } - searchResponse = searchRequestBuilder.get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("5")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); - client().prepareClearScroll().addScrollId("_all").get(); + assertResponse(searchRequestBuilder, response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("5")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + client().prepareClearScroll().addScrollId("_all").get(); + }); } public void testNestedSortWithMultiLevelFiltering() throws Exception { @@ -689,7 +682,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { }""")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource(""" + prepareIndex("test").setId("1").setSource(""" { "acl": [ { @@ -739,7 +732,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { ] }""", XContentType.JSON).get(); - client().prepareIndex("test").setId("2").setSource(""" + prepareIndex("test").setId("2").setSource(""" { "acl": [ { @@ -788,101 +781,106 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { refresh(); // access id = 1, read, max value, asc, should use matt and shay - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) - .setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) - .setNestedSort(new NestedSortBuilder("acl.operation.user")) - ) - ) - .sortMode(SortMode.MAX) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("matt")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("shay")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) + .setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) + .setNestedSort(new NestedSortBuilder("acl.operation.user")) + ) + ) + .sortMode(SortMode.MAX) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("matt")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("shay")); + } + ); // access id = 1, read, min value, asc, should now use adrien and luca - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) - .setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) - .setNestedSort(new NestedSortBuilder("acl.operation.user")) - ) - ) - .sortMode(SortMode.MIN) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("adrien")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("luca")); - - // execute, by matt or luca, by user id, sort missing first - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.id") - .setNestedSort( - new NestedSortBuilder("acl").setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setFilter(QueryBuilders.termQuery("acl.access_id", "1")) .setNestedSort( - new NestedSortBuilder("acl.operation.user").setFilter( - QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") - ) + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "read")) + .setNestedSort(new NestedSortBuilder("acl.operation.user")) ) ) - ) - .missing("_first") - .sortMode(SortMode.MIN) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); // missing first - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("1")); - + .sortMode(SortMode.MIN) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("adrien")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("luca")); + } + ); + // execute, by matt or luca, by user id, sort missing first + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.id") + .setNestedSort( + new NestedSortBuilder("acl").setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + .setNestedSort( + new NestedSortBuilder("acl.operation.user").setFilter( + QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + ) + ) + ) + ) + .missing("_first") + .sortMode(SortMode.MIN) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); // missing first + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("1")); + } + ); // execute, by matt or luca, by username, sort missing last (default) - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("acl.operation.user.username") - .setNestedSort( - new NestedSortBuilder("acl").setNestedSort( - new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) - .setNestedSort( - new NestedSortBuilder("acl.operation.user").setFilter( - QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("acl.operation.user.username") + .setNestedSort( + new NestedSortBuilder("acl").setNestedSort( + new NestedSortBuilder("acl.operation").setFilter(QueryBuilders.termQuery("acl.operation.name", "execute")) + .setNestedSort( + new NestedSortBuilder("acl.operation.user").setFilter( + QueryBuilders.termsQuery("acl.operation.user.username", "matt", "luca") + ) ) - ) + ) ) - ) - .sortMode(SortMode.MIN) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("luca")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("1")); // missing last + .sortMode(SortMode.MIN) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("luca")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("1")); // missing last + } + ); } // https://github.com/elastic/elasticsearch/issues/31554 @@ -914,7 +912,7 @@ public void testLeakingSortValues() throws Exception { """)); ensureGreen(); - client().prepareIndex("test").setId("1").setSource(""" + prepareIndex("test").setId("1").setSource(""" { "nested1": [ { @@ -928,7 +926,7 @@ public void testLeakingSortValues() throws Exception { ] }""", XContentType.JSON).get(); - client().prepareIndex("test").setId("2").setSource(""" + prepareIndex("test").setId("2").setSource(""" { "nested1": [ { @@ -944,22 +942,25 @@ public void testLeakingSortValues() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(termQuery("_id", 2)) - .addSort( - SortBuilders.fieldSort("nested1.nested2.sortVal") - .setNestedSort( - new NestedSortBuilder("nested1").setNestedSort( - new NestedSortBuilder("nested1.nested2").setFilter(termQuery("nested1.nested2.nested2_keyword", "nested2_bar")) + assertResponse( + prepareSearch().setQuery(termQuery("_id", 2)) + .addSort( + SortBuilders.fieldSort("nested1.nested2.sortVal") + .setNestedSort( + new NestedSortBuilder("nested1").setNestedSort( + new NestedSortBuilder("nested1.nested2").setFilter( + termQuery("nested1.nested2.nested2_keyword", "nested2_bar") + ) + ) ) - ) - ) - .get(); - - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); - + ), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); + } + ); } public void testSortNestedWithNestedFilter() throws Exception { @@ -996,8 +997,7 @@ public void testSortNestedWithNestedFilter() throws Exception { ensureGreen(); // sum: 11 - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("grand_parent_values", 1L) @@ -1039,8 +1039,7 @@ public void testSortNestedWithNestedFilter() throws Exception { .get(); // sum: 7 - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("grand_parent_values", 2L) @@ -1082,8 +1081,7 @@ public void testSortNestedWithNestedFilter() throws Exception { .get(); // sum: 2 - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .field("grand_parent_values", 3L) @@ -1126,215 +1124,236 @@ public void testSortNestedWithNestedFilter() throws Exception { refresh(); // Without nested filter - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .order(SortOrder.ASC) - ) - .get(); - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("-3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("-2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("-1")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("-3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("-2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("-1")); + } + ); // With nested filter NestedSortBuilder nestedSort = new NestedSortBuilder("parent.child"); nestedSort.setFilter(QueryBuilders.termQuery("parent.child.filter", true)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Nested path should be automatically detected, expect same results as above search request - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); nestedSort.setFilter(QueryBuilders.termQuery("parent.filter", false)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.parent_values").setNestedSort(nestedSort).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort( - new NestedSortBuilder("parent").setFilter(QueryBuilders.termQuery("parent.filter", false)) - .setNestedSort(new NestedSortBuilder("parent.child")) - ) - .sortMode(SortMode.MAX) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("6")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("parent.parent_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent").setFilter(QueryBuilders.termQuery("parent.filter", false)) + .setNestedSort(new NestedSortBuilder("parent.child")) + ) + .sortMode(SortMode.MAX) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("4")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("6")); + } + ); // Check if closest nested type is resolved - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_obj.value") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_obj.value") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Sort mode: sum - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.SUM) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("11")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.SUM) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("11")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.SUM) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("11")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.SUM) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("11")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("7")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); + } + ); // Sort mode: sum with filter - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .sortMode(SortMode.SUM) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.SUM) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); // Sort mode: avg - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child")) - .sortMode(SortMode.AVG) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("1")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort(new NestedSortBuilder("parent.child")) + .sortMode(SortMode.AVG) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("3")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("1")); + } + ); // Sort mode: avg with filter - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort(new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true))) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - - assertHitCount(searchResponse, 3); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(searchResponse.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + } + ); } // Issue #9305 @@ -1373,8 +1392,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { ) ); - DocWriteResponse indexResponse1 = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse1 = prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("officelocation", "gendale") @@ -1427,8 +1445,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { .get(); assertTrue(indexResponse1.getShardInfo().getSuccessful() > 0); - DocWriteResponse indexResponse2 = client().prepareIndex("test") - .setId("2") + DocWriteResponse indexResponse2 = prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("officelocation", "gendale") @@ -1482,27 +1499,30 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { assertTrue(indexResponse2.getShardInfo().getSuccessful() > 0); refresh(); - SearchResponse searchResponse = prepareSearch("test").addSort( - SortBuilders.fieldSort("users.first").setNestedSort(new NestedSortBuilder("users")).order(SortOrder.ASC) - ) - .addSort( - SortBuilders.fieldSort("users.first") - .order(SortOrder.ASC) - .setNestedSort( - new NestedSortBuilder("users").setFilter( - nestedQuery("users.workstations", termQuery("users.workstations.stationid", "s5"), ScoreMode.Avg) - ) - ) + assertNoFailuresAndResponse( + prepareSearch("test").addSort( + SortBuilders.fieldSort("users.first").setNestedSort(new NestedSortBuilder("users")).order(SortOrder.ASC) ) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[1].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("fname1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[1].toString(), equalTo("fname3")); + .addSort( + SortBuilders.fieldSort("users.first") + .order(SortOrder.ASC) + .setNestedSort( + new NestedSortBuilder("users").setFilter( + nestedQuery("users.workstations", termQuery("users.workstations.stationid", "s5"), ScoreMode.Avg) + ) + ) + ), + response -> { + assertNoFailures(response); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(0).getSortValues()[1].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("fname1")); + assertThat(response.getHits().getAt(1).getSortValues()[1].toString(), equalTo("fname3")); + } + ); } public void testCheckFixedBitSetCache() throws Exception { @@ -1513,8 +1533,8 @@ public void testCheckFixedBitSetCache() throws Exception { } assertAcked(prepareCreate("test").setSettings(settingsBuilder)); - client().prepareIndex("test").setId("0").setSource("field", "value").get(); - client().prepareIndex("test").setId("1").setSource("field", "value").get(); + prepareIndex("test").setId("0").setSource("field", "value").get(); + prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); ensureSearchable("test"); @@ -1533,11 +1553,11 @@ public void testCheckFixedBitSetCache() throws Exception { .endArray() .endObject(); // index simple data - client().prepareIndex("test").setId("2").setSource(builder).get(); - client().prepareIndex("test").setId("3").setSource(builder).get(); - client().prepareIndex("test").setId("4").setSource(builder).get(); - client().prepareIndex("test").setId("5").setSource(builder).get(); - client().prepareIndex("test").setId("6").setSource(builder).get(); + prepareIndex("test").setId("2").setSource(builder).get(); + prepareIndex("test").setId("3").setSource(builder).get(); + prepareIndex("test").setId("4").setSource(builder).get(); + prepareIndex("test").setId("5").setSource(builder).get(); + prepareIndex("test").setId("6").setSource(builder).get(); refresh(); ensureSearchable("test"); @@ -1546,11 +1566,10 @@ public void testCheckFixedBitSetCache() throws Exception { assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0L)); // only when querying with nested the fixed bitsets are loaded - SearchResponse searchResponse = prepareSearch("test").setQuery( - nestedQuery("array1", termQuery("array1.field1", "value1"), ScoreMode.Avg) - ).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(5L)); + assertHitCountAndNoFailures( + prepareSearch("test").setQuery(nestedQuery("array1", termQuery("array1.field1", "value1"), ScoreMode.Avg)), + 5L + ); } clusterStatsResponse = clusterAdmin().prepareClusterStats().get(); assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), greaterThan(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java index 9219641f1d3bf..3dd9e68cf08af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.nested; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.vectors.KnnSearchBuilder; @@ -18,6 +17,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -47,8 +47,7 @@ public void testSimpleNested() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("nested") @@ -63,13 +62,14 @@ public void testSimpleNested() throws Exception { waitForRelocation(ClusterHealthStatus.GREEN); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getSourceAsBytes(), notNullValue()); + assertThat(getResponse.getSourceAsBytesRef(), notNullValue()); refresh(); - SearchResponse searchResponse = prepareSearch("test").setKnnSearch( - List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null)) - ).setAllowPartialSearchResults(false).get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); + assertResponse( + prepareSearch("test").setKnnSearch(List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null))) + .setAllowPartialSearchResults(false), + response -> assertThat(response.getHits().getHits().length, greaterThan(0)) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 526d523bb0638..0acf9be574ffe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.profile.aggregation; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -42,7 +41,7 @@ import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -106,14 +105,13 @@ protected void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < 5; i++) { builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(STRING_FIELD, randomFrom(randomStrings)) - .field(NUMBER_FIELD, randomIntBetween(0, 9)) - .field(TAG_FIELD, randomBoolean() ? "more" : "less") - .endObject() - ) + prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field(STRING_FIELD, randomFrom(randomStrings)) + .field(NUMBER_FIELD, randomIntBetween(0, 9)) + .field(TAG_FIELD, randomBoolean() ? "more" : "less") + .endObject() + ) ); } @@ -122,110 +120,113 @@ protected void setupSuiteScopeCluster() throws Exception { } public void testSimpleProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertThat(breakdown, notNullValue()); - assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(breakdown.get(COLLECT), greaterThan(0L)); - assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); - assertThat(breakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true).addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(breakdown.get(COLLECT), greaterThan(0L)); + assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); + assertThat(breakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + } + } + ); } public void testMultiLevelProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("terms").field(TAG_FIELD) - .order(BucketOrder.aggregation("avg", false)) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - - ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); - assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); - assertThat(termsAggResult.getTime(), greaterThan(0L)); - Map termsBreakdown = termsAggResult.getTimeBreakdown(); - assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(termsAggResult); - assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("terms").field(TAG_FIELD) + .order(BucketOrder.aggregation("avg", false)) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + + ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(termsAggResult, notNullValue()); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); + assertThat(termsAggResult.getTime(), greaterThan(0L)); + Map termsBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(termsBreakdown, notNullValue()); + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(termsAggResult); + assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } private void assertRemapTermsDebugInfo(ProfileResult termsAggResult, String... deferredAggregators) { @@ -243,375 +244,386 @@ private void assertRemapTermsDebugInfo(ProfileResult termsAggResult, String... d } public void testMultiLevelProfileBreadthFirst() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("terms").collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); - assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); - assertThat(termsAggResult.getTime(), greaterThan(0L)); - Map termsBreakdown = termsAggResult.getTimeBreakdown(); - assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(termsAggResult, "avg"); - assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); - - ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("terms").collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); + assertThat(termsAggResult, notNullValue()); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); + assertThat(termsAggResult.getTime(), greaterThan(0L)); + Map termsBreakdown = termsAggResult.getTimeBreakdown(); + assertThat(termsBreakdown, notNullValue()); + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(termsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(termsAggResult, "avg"); + assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1)); + + ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testDiversifiedAggProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - diversifiedSampler("diversify").shardSize(10) - .field(STRING_FIELD) - .maxDocsPerValue(2) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult diversifyAggResult = aggProfileResultsList.get(0); - assertThat(diversifyAggResult, notNullValue()); - assertThat(diversifyAggResult.getQueryName(), equalTo(DiversifiedOrdinalsSamplerAggregator.class.getSimpleName())); - assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify")); - assertThat(diversifyAggResult.getTime(), greaterThan(0L)); - Map diversifyBreakdown = diversifyAggResult.getTimeBreakdown(); - assertThat(diversifyBreakdown, notNullValue()); - assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); - assertMap(diversifyAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0)).entry(DEFERRED, List.of("max"))); - - ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - Map maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + diversifiedSampler("diversify").shardSize(10) + .field(STRING_FIELD) + .maxDocsPerValue(2) + .subAggregation(max("max").field(NUMBER_FIELD)) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult diversifyAggResult = aggProfileResultsList.get(0); + assertThat(diversifyAggResult, notNullValue()); + assertThat(diversifyAggResult.getQueryName(), equalTo(DiversifiedOrdinalsSamplerAggregator.class.getSimpleName())); + assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify")); + assertThat(diversifyAggResult.getTime(), greaterThan(0L)); + Map diversifyBreakdown = diversifyAggResult.getTimeBreakdown(); + assertThat(diversifyBreakdown, notNullValue()); + assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(diversifyBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + diversifyAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)).entry(DEFERRED, List.of("max")) + ); + + ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + Map maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(diversifyBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testComplexProfile() { - SearchResponse response = prepareSearch("idx").setProfile(true) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .subAggregation( - terms("strings").field(STRING_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map histoBreakdown = histoAggResult.getTimeBreakdown(); - assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(histoBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) - ); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); - - Map histoAggResultSubAggregations = histoAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - ProfileResult tagsAggResult = histoAggResultSubAggregations.get("tags"); - assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(tagsAggResult.getTime(), greaterThan(0L)); - Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); - assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(tagsAggResult); - assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); - - Map tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - ProfileResult avgAggResult = tagsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - Map avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - Map maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - - ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); - assertThat(stringsAggResult, notNullValue()); - assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(stringsAggResult.getTime(), greaterThan(0L)); - Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); - assertThat(stringsBreakdown, notNullValue()); - assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(stringsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(stringsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(stringsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(stringsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(stringsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(stringsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(stringsAggResult); - assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3)); - - Map stringsAggResultSubAggregations = stringsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - avgAggResult = stringsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - maxAggResult = stringsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - - tagsAggResult = stringsAggResultSubAggregations.get("tags"); - assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); - assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); - assertThat(tagsAggResult.getTime(), greaterThan(0L)); - tagsBreakdown = tagsAggResult.getTimeBreakdown(); - assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); - assertRemapTermsDebugInfo(tagsAggResult); - assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); - - tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() - .stream() - .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); - - avgAggResult = tagsAggResultSubAggregations.get("avg"); - assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); - assertThat(avgAggResult.getTime(), greaterThan(0L)); - avgBreakdown = avgAggResult.getTimeBreakdown(); - assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); - assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); - - maxAggResult = tagsAggResultSubAggregations.get("max"); - assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); - assertThat(maxAggResult.getTime(), greaterThan(0L)); - maxBreakdown = maxAggResult.getTimeBreakdown(); - assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); - assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); - assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); - assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); - assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); - assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); - assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); - } + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(true) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + .subAggregation( + terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map histoBreakdown = histoAggResult.getTimeBreakdown(); + assertThat(histoBreakdown, notNullValue()); + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(histoBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(histoBreakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(TOTAL_BUCKETS, greaterThan(0L)).entry(BUILT_BUCKETS, greaterThan(0)) + ); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2)); + + Map histoAggResultSubAggregations = histoAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + ProfileResult tagsAggResult = histoAggResultSubAggregations.get("tags"); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(tagsAggResult); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + Map tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + ProfileResult avgAggResult = tagsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + Map avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult maxAggResult = tagsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + Map maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + ProfileResult stringsAggResult = histoAggResultSubAggregations.get("strings"); + assertThat(stringsAggResult, notNullValue()); + assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(stringsAggResult.getTime(), greaterThan(0L)); + Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); + assertThat(stringsBreakdown, notNullValue()); + assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(stringsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(stringsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(stringsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(stringsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(stringsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(stringsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(stringsAggResult); + assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3)); + + Map stringsAggResultSubAggregations = stringsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + avgAggResult = stringsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = stringsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + + tagsAggResult = stringsAggResultSubAggregations.get("tags"); + assertThat(tagsAggResult, notNullValue()); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); + assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); + assertThat(tagsAggResult.getTime(), greaterThan(0L)); + tagsBreakdown = tagsAggResult.getTimeBreakdown(); + assertThat(tagsBreakdown, notNullValue()); + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(tagsBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(tagsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(tagsBreakdown.get(REDUCE), equalTo(0L)); + assertRemapTermsDebugInfo(tagsAggResult); + assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2)); + + tagsAggResultSubAggregations = tagsAggResult.getProfiledChildren() + .stream() + .collect(Collectors.toMap(ProfileResult::getLuceneDescription, s -> s)); + + avgAggResult = tagsAggResultSubAggregations.get("avg"); + assertThat(avgAggResult, notNullValue()); + assertThat(avgAggResult.getQueryName(), equalTo("AvgAggregator")); + assertThat(avgAggResult.getTime(), greaterThan(0L)); + avgBreakdown = avgAggResult.getTimeBreakdown(); + assertThat(avgBreakdown, notNullValue()); + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(avgBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(avgBreakdown.get(REDUCE), equalTo(0L)); + assertMap(avgAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0)); + + maxAggResult = tagsAggResultSubAggregations.get("max"); + assertThat(maxAggResult, notNullValue()); + assertThat(maxAggResult.getQueryName(), equalTo("MaxAggregator")); + assertThat(maxAggResult.getTime(), greaterThan(0L)); + maxBreakdown = maxAggResult.getTimeBreakdown(); + assertThat(maxBreakdown, notNullValue()); + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); + assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); + assertThat(maxBreakdown.get(POST_COLLECTION), greaterThan(0L)); + assertThat(maxBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); + assertThat(maxBreakdown.get(REDUCE), equalTo(0L)); + assertMap(maxAggResult.getDebugInfo(), matchesMap().entry(BUILT_BUCKETS, greaterThan(0))); + assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0)); + } + } + ); } public void testNoProfile() { - SearchResponse response = prepareSearch("idx").setProfile(false) - .addAggregation( - histogram("histo").field(NUMBER_FIELD) - .interval(1L) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - .subAggregation( - terms("strings").field(STRING_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - .subAggregation( - terms("tags").field(TAG_FIELD) - .subAggregation(avg("avg").field(NUMBER_FIELD)) - .subAggregation(max("max").field(NUMBER_FIELD)) - ) - ) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(0)); + assertNoFailuresAndResponse( + prepareSearch("idx").setProfile(false) + .addAggregation( + histogram("histo").field(NUMBER_FIELD) + .interval(1L) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + .subAggregation( + terms("strings").field(STRING_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + .subAggregation( + terms("tags").field(TAG_FIELD) + .subAggregation(avg("avg").field(NUMBER_FIELD)) + .subAggregation(max("max").field(NUMBER_FIELD)) + ) + ) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(0)); + } + ); } /** @@ -630,66 +642,70 @@ public void testFilterByFilter() throws InterruptedException, IOException { List builders = new ArrayList<>(); for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) { String date = Instant.ofEpochSecond(i).toString(); - builders.add(client().prepareIndex("dateidx").setSource(jsonBuilder().startObject().field("date", date).endObject())); + builders.add(prepareIndex("dateidx").setSource(jsonBuilder().startObject().field("date", date).endObject())); } indexRandom(true, false, builders); - SearchResponse response = prepareSearch("dateidx").setProfile(true) - .addAggregation( - new DateHistogramAggregationBuilder("histo").field("date") - .calendarInterval(DateHistogramInterval.MONTH) - // Add a sub-agg so we don't get to use metadata. That's great and all, but it outputs less debugging info for us to - // verify. - .subAggregation(new MaxAggregationBuilder("m").field("date")) - ) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("dateidx").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertThat(breakdown, notNullValue()); - assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); - assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); - assertThat(breakdown.get(COLLECT), equalTo(0L)); - assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); - assertThat(breakdown.get(REDUCE), equalTo(0L)); - assertMap( - histoAggResult.getDebugInfo(), - matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) - .entry("delegate", "RangeAggregator.FromFilters") - .entry( - "delegate_debug", - matchesMap().entry("average_docs_per_range", equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)) - .entry("ranges", 1) - .entry("delegate", "FilterByFilterAggregator") + assertNoFailuresAndResponse( + prepareSearch("dateidx").setProfile(true) + .addAggregation( + new DateHistogramAggregationBuilder("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + // Add a sub-agg so we don't get to use metadata. That's great and all, but it outputs less debugging info for us to + // verify. + .subAggregation(new MaxAggregationBuilder("m").field("date")) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("dateidx").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(breakdown.get(COLLECT), equalTo(0L)); + assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); + assertThat(breakdown.get(REDUCE), equalTo(0L)); + assertMap( + histoAggResult.getDebugInfo(), + matchesMap().entry(BUILT_BUCKETS, greaterThan(0)) + .entry("delegate", "RangeAggregator.FromFilters") .entry( "delegate_debug", - matchesMap().entry("segments_with_deleted_docs", greaterThanOrEqualTo(0)) - .entry("segments_with_doc_count_field", 0) - .entry("segments_counted", 0) - .entry("segments_collected", greaterThan(0)) + matchesMap().entry("average_docs_per_range", equalTo(RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2)) + .entry("ranges", 1) + .entry("delegate", "FilterByFilterAggregator") .entry( - "filters", - matchesList().item(matchesMap().entry("query", "*:*").entry("segments_counted_in_constant_time", 0)) + "delegate_debug", + matchesMap().entry("segments_with_deleted_docs", greaterThanOrEqualTo(0)) + .entry("segments_with_doc_count_field", 0) + .entry("segments_counted", 0) + .entry("segments_collected", greaterThan(0)) + .entry( + "filters", + matchesList().item( + matchesMap().entry("query", "*:*").entry("segments_counted_in_constant_time", 0) + ) + ) ) ) - ) - ); - } + ); + } + } + ); } public void testDateHistogramFilterByFilterDisabled() throws InterruptedException, IOException { @@ -704,62 +720,65 @@ public void testDateHistogramFilterByFilterDisabled() throws InterruptedExceptio for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) { String date = Instant.ofEpochSecond(i).toString(); builders.add( - client().prepareIndex("date_filter_by_filter_disabled") - .setSource(jsonBuilder().startObject().field("date", date).endObject()) + prepareIndex("date_filter_by_filter_disabled").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); } indexRandom(true, false, builders); - SearchResponse response = prepareSearch("date_filter_by_filter_disabled").setProfile(true) - .addAggregation(new DateHistogramAggregationBuilder("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) - .get(); - assertNoFailures(response); - Map profileResults = response.getProfileResults(); - assertThat(profileResults, notNullValue()); - assertThat(profileResults.size(), equalTo(getNumShards("date_filter_by_filter_disabled").numPrimaries)); - for (SearchProfileShardResult profileShardResult : profileResults.values()) { - assertThat(profileShardResult, notNullValue()); - AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); - assertThat(aggProfileResults, notNullValue()); - List aggProfileResultsList = aggProfileResults.getProfileResults(); - assertThat(aggProfileResultsList, notNullValue()); - assertThat(aggProfileResultsList.size(), equalTo(1)); - ProfileResult histoAggResult = aggProfileResultsList.get(0); - assertThat(histoAggResult, notNullValue()); - assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); - assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); - assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); - assertThat(histoAggResult.getTime(), greaterThan(0L)); - Map breakdown = histoAggResult.getTimeBreakdown(); - assertMap( - breakdown, - matchesMap().entry(INITIALIZE, greaterThan(0L)) - .entry(INITIALIZE + "_count", greaterThan(0L)) - .entry(BUILD_LEAF_COLLECTOR, greaterThan(0L)) - .entry(BUILD_LEAF_COLLECTOR + "_count", greaterThan(0L)) - .entry(COLLECT, greaterThan(0L)) - .entry(COLLECT + "_count", greaterThan(0L)) - .entry(POST_COLLECTION, greaterThan(0L)) - .entry(POST_COLLECTION + "_count", 1L) - .entry(BUILD_AGGREGATION, greaterThan(0L)) - .entry(BUILD_AGGREGATION + "_count", greaterThan(0L)) - .entry(REDUCE, 0L) - .entry(REDUCE + "_count", 0L) - ); - Map debug = histoAggResult.getDebugInfo(); - assertMap( - debug, - matchesMap().entry("delegate", "RangeAggregator.NoOverlap") - .entry("built_buckets", 1) - .entry( - "delegate_debug", - matchesMap().entry("ranges", 1) - .entry("average_docs_per_range", 10000.0) - .entry("singletons", greaterThan(0)) - .entry("non-singletons", 0) - ) - ); - } + assertNoFailuresAndResponse( + prepareSearch("date_filter_by_filter_disabled").setProfile(true) + .addAggregation( + new DateHistogramAggregationBuilder("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) + ), + response -> { + Map profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("date_filter_by_filter_disabled").numPrimaries)); + for (SearchProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(1)); + ProfileResult histoAggResult = aggProfileResultsList.get(0); + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("DateHistogramAggregator.FromDateRange")); + assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map breakdown = histoAggResult.getTimeBreakdown(); + assertMap( + breakdown, + matchesMap().entry(INITIALIZE, greaterThan(0L)) + .entry(INITIALIZE + "_count", greaterThan(0L)) + .entry(BUILD_LEAF_COLLECTOR, greaterThan(0L)) + .entry(BUILD_LEAF_COLLECTOR + "_count", greaterThan(0L)) + .entry(COLLECT, greaterThan(0L)) + .entry(COLLECT + "_count", greaterThan(0L)) + .entry(POST_COLLECTION, greaterThan(0L)) + .entry(POST_COLLECTION + "_count", 1L) + .entry(BUILD_AGGREGATION, greaterThan(0L)) + .entry(BUILD_AGGREGATION + "_count", greaterThan(0L)) + .entry(REDUCE, 0L) + .entry(REDUCE + "_count", 0L) + ); + Map debug = histoAggResult.getDebugInfo(); + assertMap( + debug, + matchesMap().entry("delegate", "RangeAggregator.NoOverlap") + .entry("built_buckets", 1) + .entry( + "delegate_debug", + matchesMap().entry("ranges", 1) + .entry("average_docs_per_range", 10000.0) + .entry("singletons", greaterThan(0)) + .entry("non-singletons", 0) + ) + ); + } + } + ); } finally { updateClusterSettings(Settings.builder().putNull(SearchService.ENABLE_REWRITE_AGGS_TO_FILTER_BY_FILTER.getKey())); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java index f7b2b0f4443d3..c6d3a6733d2fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.tests.util.English; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.profile.ProfileResult; @@ -28,6 +27,7 @@ import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -50,8 +50,7 @@ public void testProfileDfs() throws Exception { int numDocs = randomIntBetween(10, 50); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(indexName) - .setId(String.valueOf(i)) + docs[i] = prepareIndex(indexName).setId(String.valueOf(i)) .setSource( textField, English.intToEnglish(i), @@ -67,53 +66,55 @@ public void testProfileDfs() throws Exception { for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(List.of(textField), List.of(numericField), numDocs, 3); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q) - .setTrackTotalHits(true) - .setProfile(true) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setKnnSearch( - randomList( - 2, - 5, - () -> new KnnSearchBuilder( - vectorField, - new float[] { randomFloat(), randomFloat(), randomFloat() }, - randomIntBetween(5, 10), - 50, - randomBoolean() ? null : randomFloat() + assertResponse( + prepareSearch().setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setKnnSearch( + randomList( + 2, + 5, + () -> new KnnSearchBuilder( + vectorField, + new float[] { randomFloat(), randomFloat(), randomFloat() }, + randomIntBetween(5, 10), + 50, + randomBoolean() ? null : randomFloat() + ) ) - ) - ) - .get(); - - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shard : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); - } - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); - } - SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = shard.getValue().getSearchProfileDfsPhaseResult(); - assertThat(searchProfileDfsPhaseResult, is(notNullValue())); - for (QueryProfileShardResult queryProfileShardResult : searchProfileDfsPhaseResult.getQueryProfileShardResult()) { - for (ProfileResult result : queryProfileShardResult.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); + ), + response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + for (Map.Entry shard : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = shard.getValue().getSearchProfileDfsPhaseResult(); + assertThat(searchProfileDfsPhaseResult, is(notNullValue())); + for (QueryProfileShardResult queryProfileShardResult : searchProfileDfsPhaseResult.getQueryProfileShardResult()) { + for (ProfileResult result : queryProfileShardResult.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = queryProfileShardResult.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + ProfileResult statsResult = searchProfileDfsPhaseResult.getDfsShardResult(); + assertThat(statsResult.getQueryName(), equalTo("statistics")); } - CollectorResult result = queryProfileShardResult.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - ProfileResult statsResult = searchProfileDfsPhaseResult.getDfsShardResult(); - assertThat(statsResult.getQueryName(), equalTo("statistics")); - } + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index e7b02faede9b1..e02bed8409bc4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -10,7 +10,7 @@ import org.apache.lucene.tests.util.English; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.MultiSearchResponse.Item; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -30,6 +30,7 @@ import java.util.Set; import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -49,7 +50,7 @@ public void testProfileQuery() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } List stringFields = Arrays.asList("field1"); @@ -62,29 +63,26 @@ public void testProfileQuery() throws Exception { for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); logger.info("Query: {}", q); - - SearchResponse resp = prepareSearch().setQuery(q) - .setTrackTotalHits(true) - .setProfile(true) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .get(); - - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shard : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); + assertResponse( + prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), + response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + for (Map.Entry shard : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } - + ); } } @@ -100,8 +98,7 @@ public void testProfileMatchesRegular() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test") - .setId(String.valueOf(i)) + docs[i] = prepareIndex("test").setId(String.valueOf(i)) .setSource("id", String.valueOf(i), "field1", English.intToEnglish(i), "field2", i); } @@ -126,47 +123,52 @@ public void testProfileMatchesRegular() throws Exception { .setSearchType(SearchType.QUERY_THEN_FETCH) .setRequestCache(false); - MultiSearchResponse.Item[] responses = client().prepareMultiSearch().add(vanilla).add(profile).get().getResponses(); - - SearchResponse vanillaResponse = responses[0].getResponse(); - SearchResponse profileResponse = responses[1].getResponse(); - - assertThat(vanillaResponse.getFailedShards(), equalTo(0)); - assertThat(profileResponse.getFailedShards(), equalTo(0)); - assertThat(vanillaResponse.getSuccessfulShards(), equalTo(profileResponse.getSuccessfulShards())); - - float vanillaMaxScore = vanillaResponse.getHits().getMaxScore(); - float profileMaxScore = profileResponse.getHits().getMaxScore(); - if (Float.isNaN(vanillaMaxScore)) { - assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", Float.isNaN(profileMaxScore)); - } else { - assertEquals( - "Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", - vanillaMaxScore, - profileMaxScore, - 0.001 - ); - } + assertResponse(client().prepareMultiSearch().add(vanilla).add(profile), response -> { + Item[] responses = response.getResponses(); - if (vanillaResponse.getHits().getTotalHits().value != profileResponse.getHits().getTotalHits().value) { - Set vanillaSet = new HashSet<>(Arrays.asList(vanillaResponse.getHits().getHits())); - Set profileSet = new HashSet<>(Arrays.asList(profileResponse.getHits().getHits())); - if (vanillaResponse.getHits().getTotalHits().value > profileResponse.getHits().getTotalHits().value) { - vanillaSet.removeAll(profileSet); - fail("Vanilla hits were larger than profile hits. Non-overlapping elements were: " + vanillaSet.toString()); + SearchResponse vanillaResponse = responses[0].getResponse(); + SearchResponse profileResponse = responses[1].getResponse(); + + assertThat(vanillaResponse.getFailedShards(), equalTo(0)); + assertThat(profileResponse.getFailedShards(), equalTo(0)); + assertThat(vanillaResponse.getSuccessfulShards(), equalTo(profileResponse.getSuccessfulShards())); + + float vanillaMaxScore = vanillaResponse.getHits().getMaxScore(); + float profileMaxScore = profileResponse.getHits().getMaxScore(); + if (Float.isNaN(vanillaMaxScore)) { + assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", Float.isNaN(profileMaxScore)); } else { - profileSet.removeAll(vanillaSet); - fail("Profile hits were larger than vanilla hits. Non-overlapping elements were: " + profileSet.toString()); + assertEquals( + "Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", + vanillaMaxScore, + profileMaxScore, + 0.001 + ); } - } - SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); - SearchHit[] profileHits = profileResponse.getHits().getHits(); + if (vanillaResponse.getHits().getTotalHits().value != profileResponse.getHits().getTotalHits().value) { + Set vanillaSet = new HashSet<>(Arrays.asList(vanillaResponse.getHits().getHits())); + Set profileSet = new HashSet<>(Arrays.asList(profileResponse.getHits().getHits())); + if (vanillaResponse.getHits().getTotalHits().value > profileResponse.getHits().getTotalHits().value) { + vanillaSet.removeAll(profileSet); + fail("Vanilla hits were larger than profile hits. Non-overlapping elements were: " + vanillaSet.toString()); + } else { + profileSet.removeAll(vanillaSet); + fail("Profile hits were larger than vanilla hits. Non-overlapping elements were: " + profileSet.toString()); + } + } - for (int j = 0; j < vanillaHits.length; j++) { - assertThat("Profile hit #" + j + " has a different ID from Vanilla", vanillaHits[j].getId(), equalTo(profileHits[j].getId())); - } + SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); + SearchHit[] profileHits = profileResponse.getHits().getHits(); + for (int j = 0; j < vanillaHits.length; j++) { + assertThat( + "Profile hit #" + j + " has a different ID from Vanilla", + vanillaHits[j].getId(), + equalTo(profileHits[j].getId()) + ); + } + }); } /** @@ -177,7 +179,7 @@ public void testSimpleMatch() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -185,26 +187,26 @@ public void testSimpleMatch() throws Exception { QueryBuilder q = QueryBuilders.matchQuery("field1", "one"); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + Map p = response.getProfileResults(); + assertNotNull(p); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - Map p = resp.getProfileResults(); - assertNotNull(p); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "TermQuery"); + assertEquals(result.getLuceneDescription(), "field1:one"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertEquals(result.getQueryName(), "TermQuery"); - assertEquals(result.getLuceneDescription(), "field1:one"); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } /** @@ -217,7 +219,7 @@ public void testBool() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -226,45 +228,44 @@ public void testBool() throws Exception { .must(QueryBuilders.matchQuery("field1", "one")) .must(QueryBuilders.matchQuery("field1", "two")); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + Map p = response.getProfileResults(); + assertNotNull(p); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - Map p = resp.getProfileResults(); - assertNotNull(p); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "BooleanQuery"); + assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + assertEquals(result.getProfiledChildren().size(), 2); + + // Check the children + List children = result.getProfiledChildren(); + assertEquals(children.size(), 2); + + ProfileResult childProfile = children.get(0); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:one"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + assertEquals(childProfile.getProfiledChildren().size(), 0); + + childProfile = children.get(1); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:two"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertEquals(result.getQueryName(), "BooleanQuery"); - assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); - assertEquals(result.getProfiledChildren().size(), 2); - - // Check the children - List children = result.getProfiledChildren(); - assertEquals(children.size(), 2); - - ProfileResult childProfile = children.get(0); - assertEquals(childProfile.getQueryName(), "TermQuery"); - assertEquals(childProfile.getLuceneDescription(), "field1:one"); - assertThat(childProfile.getTime(), greaterThan(0L)); - assertNotNull(childProfile.getTimeBreakdown()); - assertEquals(childProfile.getProfiledChildren().size(), 0); - - childProfile = children.get(1); - assertEquals(childProfile.getQueryName(), "TermQuery"); - assertEquals(childProfile.getLuceneDescription(), "field1:two"); - assertThat(childProfile.getTime(), greaterThan(0L)); - assertNotNull(childProfile.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } - + }); } /** @@ -277,7 +278,7 @@ public void testEmptyBool() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -287,25 +288,25 @@ public void testEmptyBool() throws Exception { QueryBuilder q = QueryBuilders.boolQuery(); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } /** @@ -320,7 +321,7 @@ public void testCollapsingBool() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -332,25 +333,25 @@ public void testCollapsingBool() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testBoosting() throws Exception { @@ -360,7 +361,7 @@ public void testBoosting() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -372,25 +373,25 @@ public void testBoosting() throws Exception { .negativeBoost(randomFloat()); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testDisMaxRange() throws Exception { @@ -400,7 +401,7 @@ public void testDisMaxRange() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -412,25 +413,25 @@ public void testDisMaxRange() throws Exception { .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testRange() throws Exception { @@ -440,7 +441,7 @@ public void testRange() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -451,25 +452,25 @@ public void testRange() throws Exception { logger.info("Query: {}", q.toString()); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH).get(); + assertResponse(prepareSearch().setQuery(q).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + }); } public void testPhrase() throws Exception { @@ -479,8 +480,7 @@ public void testPhrase() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test") - .setId(String.valueOf(i)) + docs[i] = prepareIndex("test").setId(String.valueOf(i)) .setSource("field1", English.intToEnglish(i) + " " + English.intToEnglish(i + 1), "field2", i); } @@ -492,36 +492,35 @@ public void testPhrase() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q) - .setIndices("test") - .setProfile(true) - .setSearchType(SearchType.QUERY_THEN_FETCH) - .get(); + assertResponse( + prepareSearch().setQuery(q).setIndices("test").setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), + response -> { + if (response.getShardFailures().length > 0) { + for (ShardSearchFailure f : response.getShardFailures()) { + logger.error("Shard search failure: {}", f); + } + fail(); + } - if (resp.getShardFailures().length > 0) { - for (ShardSearchFailure f : resp.getShardFailures()) { - logger.error("Shard search failure: {}", f); - } - fail(); - } + assertNotNull("Profile response element should not be null", response.getProfileResults()); + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); - assertNotNull("Profile response element should not be null", resp.getProfileResults()); - assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shardResult : response.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { - for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { - for (ProfileResult result : searchProfiles.getQueryResults()) { - assertNotNull(result.getQueryName()); - assertNotNull(result.getLuceneDescription()); - assertThat(result.getTime(), greaterThan(0L)); - assertNotNull(result.getTimeBreakdown()); + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } } - - CollectorResult result = searchProfiles.getCollectorResult(); - assertThat(result.getName(), is(not(emptyOrNullString()))); - assertThat(result.getTime(), greaterThan(0L)); } - } + ); } /** @@ -534,7 +533,7 @@ public void testNoProfile() throws Exception { int numDocs = randomIntBetween(100, 150); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + docs[i] = prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); } indexRandom(true, docs); @@ -543,8 +542,9 @@ public void testNoProfile() throws Exception { logger.info("Query: {}", q); - SearchResponse resp = prepareSearch().setQuery(q).setProfile(false).get(); - assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); + assertResponse( + prepareSearch().setQuery(q).setProfile(false), + response -> assertThat("Profile response element should be an empty map", response.getProfileResults().size(), equalTo(0)) + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java index 099100a7a67e3..81c612107e44a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ExistsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -30,7 +29,9 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; public class ExistsIT extends ESIntegTestCase { @@ -94,7 +95,7 @@ public void testExists() throws Exception { emptyMap() }; List reqs = new ArrayList<>(); for (Map source : sources) { - reqs.add(client().prepareIndex("idx").setSource(source)); + reqs.add(prepareIndex("idx").setSource(source)); } // We do NOT index dummy documents, otherwise the type for these dummy documents // would have _field_names indexed while the current type might not which might @@ -113,46 +114,46 @@ public void testExists() throws Exception { expected.put("vec", 2); final long numDocs = sources.length; - SearchResponse allDocs = prepareSearch("idx").setSize(sources.length).get(); - assertNoFailures(allDocs); - assertHitCount(allDocs, numDocs); - for (Map.Entry entry : expected.entrySet()) { - final String fieldName = entry.getKey(); - final int count = entry.getValue(); - // exists - SearchResponse resp = prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)).get(); - assertNoFailures(resp); - try { - assertEquals( - String.format( - Locale.ROOT, - "exists(%s, %d) mapping: %s response: %s", - fieldName, - count, - Strings.toString(mapping), - resp - ), - count, - resp.getHits().getTotalHits().value - ); - } catch (AssertionError e) { - for (SearchHit searchHit : allDocs.getHits()) { - final String index = searchHit.getIndex(); - final String id = searchHit.getId(); - final ExplainResponse explanation = client().prepareExplain(index, id) - .setQuery(QueryBuilders.existsQuery(fieldName)) - .get(); - logger.info( - "Explanation for [{}] / [{}] / [{}]: [{}]", - fieldName, - id, - searchHit.getSourceAsString(), - explanation.getExplanation() - ); - } - throw e; + assertNoFailuresAndResponse(prepareSearch("idx").setSize(sources.length), allDocs -> { + assertHitCount(allDocs, numDocs); + for (Map.Entry entry : expected.entrySet()) { + final String fieldName = entry.getKey(); + final int count = entry.getValue(); + // exists + assertNoFailuresAndResponse(prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)), response -> { + try { + assertEquals( + String.format( + Locale.ROOT, + "exists(%s, %d) mapping: %s response: %s", + fieldName, + count, + Strings.toString(mapping), + response + ), + count, + response.getHits().getTotalHits().value + ); + } catch (AssertionError e) { + for (SearchHit searchHit : allDocs.getHits()) { + final String index = searchHit.getIndex(); + final String id = searchHit.getId(); + final ExplainResponse explanation = client().prepareExplain(index, id) + .setQuery(QueryBuilders.existsQuery(fieldName)) + .get(); + logger.info( + "Explanation for [{}] / [{}] / [{}]: [{}]", + fieldName, + id, + searchHit.getSourceAsString(), + explanation.getExplanation() + ); + } + throw e; + } + }); } - } + }); } public void testFieldAlias() throws Exception { @@ -182,11 +183,11 @@ public void testFieldAlias() throws Exception { ensureGreen("idx"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource("bar", 3)); - indexRequests.add(client().prepareIndex("idx").setSource("foo", singletonMap("bar", 2.718))); - indexRequests.add(client().prepareIndex("idx").setSource("foo", singletonMap("bar", 6.283))); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource("bar", 3)); + indexRequests.add(prepareIndex("idx").setSource("foo", singletonMap("bar", 2.718))); + indexRequests.add(prepareIndex("idx").setSource("foo", singletonMap("bar", 6.283))); indexRandom(true, false, indexRequests); Map expected = new LinkedHashMap<>(); @@ -198,10 +199,7 @@ public void testFieldAlias() throws Exception { for (Map.Entry entry : expected.entrySet()) { String fieldName = entry.getKey(); int expectedCount = entry.getValue(); - - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)).get(); - assertNoFailures(response); - assertHitCount(response, expectedCount); + assertHitCountAndNoFailures(prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)), expectedCount); } } @@ -225,14 +223,12 @@ public void testFieldAliasWithNoDocValues() throws Exception { ensureGreen("idx"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource(emptyMap())); - indexRequests.add(client().prepareIndex("idx").setSource("foo", 3)); - indexRequests.add(client().prepareIndex("idx").setSource("foo", 43)); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource(emptyMap())); + indexRequests.add(prepareIndex("idx").setSource("foo", 3)); + indexRequests.add(prepareIndex("idx").setSource("foo", 43)); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("idx").setQuery(QueryBuilders.existsQuery("foo-alias")).get(); - assertNoFailures(response); - assertHitCount(response, 2); + assertHitCountAndNoFailures(prepareSearch("idx").setQuery(QueryBuilders.existsQuery("foo-alias")), 2L); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java index 1e18c0ca3c59c..50a1924843e74 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.query.IntervalQueryBuilder; @@ -30,6 +29,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; public class IntervalQueriesIT extends ESIntegTestCase { @@ -51,15 +51,16 @@ public void testEmptyIntervalsWithNestedMappings() throws InterruptedException { indexRandom( true, - client().prepareIndex("nested").setId("1").setSource("text", "the quick brown fox jumps"), - client().prepareIndex("nested").setId("2").setSource("text", "quick brown"), - client().prepareIndex("nested").setId("3").setSource("text", "quick") + prepareIndex("nested").setId("1").setSource("text", "the quick brown fox jumps"), + prepareIndex("nested").setId("2").setSource("text", "quick brown"), + prepareIndex("nested").setId("3").setSource("text", "quick") ); - SearchResponse resp = prepareSearch("nested").setQuery( - new IntervalQueryBuilder("empty_text", new IntervalsSourceProvider.Match("an empty query", 0, true, null, null, null)) - ).get(); - assertEquals(0, resp.getFailedShards()); + assertNoFailures( + prepareSearch("nested").setQuery( + new IntervalQueryBuilder("empty_text", new IntervalsSourceProvider.Match("an empty query", 0, true, null, null, null)) + ) + ); } private static class EmptyAnalyzer extends Analyzer { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index f251ab5cb6269..2d77e170abdc5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -53,6 +54,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -88,8 +91,7 @@ public void init() throws Exception { int numDocs = scaledRandomIntBetween(50, 100); List builders = new ArrayList<>(); builders.add( - client().prepareIndex("test") - .setId("theone") + prepareIndex("test").setId("theone") .setSource( "id", "theone", @@ -108,8 +110,7 @@ public void init() throws Exception { ) ); builders.add( - client().prepareIndex("test") - .setId("theother") + prepareIndex("test").setId("theother") .setSource( "id", "theother", @@ -127,8 +128,7 @@ public void init() throws Exception { ); builders.add( - client().prepareIndex("test") - .setId("ultimate1") + prepareIndex("test").setId("ultimate1") .setSource( "id", "ultimate1", @@ -145,8 +145,7 @@ public void init() throws Exception { ) ); builders.add( - client().prepareIndex("test") - .setId("ultimate2") + prepareIndex("test").setId("ultimate2") .setSource( "full_name", "Man the Ultimate Ninja", @@ -162,8 +161,7 @@ public void init() throws Exception { ); builders.add( - client().prepareIndex("test") - .setId("anotherhero") + prepareIndex("test").setId("anotherhero") .setSource( "id", "anotherhero", @@ -181,8 +179,7 @@ public void init() throws Exception { ); builders.add( - client().prepareIndex("test") - .setId("nowHero") + prepareIndex("test").setId("nowHero") .setSource( "id", "nowHero", @@ -209,8 +206,7 @@ public void init() throws Exception { String first = RandomPicks.randomFrom(random(), firstNames); String last = randomPickExcept(lastNames, first); builders.add( - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource( "id", i, @@ -267,72 +263,91 @@ private XContentBuilder createMapping() throws IOException { public void testDefaults() throws ExecutionException, InterruptedException { MatchQueryParser.Type type = MatchQueryParser.Type.BOOLEAN; - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) - ) - ).get(); - Set topNIds = Sets.newHashSet("theone", "theother"); - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - topNIds.remove(searchResponse.getHits().getAt(i).getId()); - // very likely that we hit a random doc that has the same score so orders are random since - // the doc id is the tie-breaker - } - assertThat(topNIds, empty()); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) - .type(type) - ) - ).get(); - assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother"))); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").operator(Operator.OR).type(type) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) + ) + ), + response -> { + Set topNIds = Sets.newHashSet("theone", "theother"); + for (int i = 0; i < response.getHits().getHits().length; i++) { + topNIds.remove(response.getHits().getAt(i).getId()); + // very likely that we hit a random doc that has the same score so orders are random since + // the doc id is the tie-breaker + } + assertThat(topNIds, empty()); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").operator(Operator.OR) + .type(type) + ) + ), + response -> { + assertFirstHit(response, anyOf(hasId("theone"), hasId("theother"))); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").operator(Operator.OR).type(type) + ) + ), + response -> assertFirstHit(response, hasId("theother")) + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); } public void testPhraseType() { - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase") - .operator(Operator.OR) - .type(MatchQueryParser.Type.PHRASE) - ) - ).get(); - assertFirstHit(searchResponse, hasId("ultimate2")); - assertHitCount(searchResponse, 1L); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase").operator( - Operator.OR - ).type(MatchQueryParser.Type.PHRASE) - ) - ).get(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(1L)); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase") + .operator(Operator.OR) + .type(MatchQueryParser.Type.PHRASE) + ) + ), + response -> { + assertFirstHit(response, hasId("ultimate2")); + assertHitCount(response, 1L); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase").operator( + Operator.OR + ).type(MatchQueryParser.Type.PHRASE) + ) + ), + response -> assertThat(response.getHits().getTotalHits().value, greaterThan(1L)) + ); assertSearchHitsWithoutFailures( prepareSearch("test").setQuery( @@ -348,14 +363,15 @@ public void testPhraseType() { } public void testSingleField() throws NoSuchFieldException, IllegalAccessException { - SearchResponse searchResponse = prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill"))).get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasId("theone")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill"))), + response -> assertFirstHit(response, hasId("theone")) + ); - searchResponse = prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill", "int-field")).analyzer("category")) - .get(); - assertNoFailures(searchResponse); - assertFirstHit(searchResponse, hasId("theone")); + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill", "int-field")).analyzer("category")), + response -> assertFirstHit(response, hasId("theone")) + ); String[] fields = { "full_name", @@ -393,34 +409,39 @@ public void testSingleField() throws NoSuchFieldException, IllegalAccessExceptio builder.append(RandomPicks.randomFrom(random(), query)).append(" "); } MultiMatchQueryBuilder multiMatchQueryBuilder = randomizeType(multiMatchQuery(builder.toString(), field)); - SearchResponse multiMatchResp = prepareSearch("test") - // id sort field is a tie, in case hits have the same score, - // the hits will be sorted the same consistently - .addSort("_score", SortOrder.DESC) - .addSort("id", SortOrder.ASC) - .setQuery(multiMatchQueryBuilder) - .get(); - MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString()); - - SearchResponse matchResp = prepareSearch("test") - // id tie sort - .addSort("_score", SortOrder.DESC) - .addSort("id", SortOrder.ASC) - .setQuery(matchQueryBuilder) - .get(); - assertThat( - "field: " + field + " query: " + builder.toString(), - multiMatchResp.getHits().getTotalHits().value, - equalTo(matchResp.getHits().getTotalHits().value) + assertResponse( + prepareSearch("test") + // id sort field is a tie, in case hits have the same score, + // the hits will be sorted the same consistently + .addSort("_score", SortOrder.DESC) + .addSort("id", SortOrder.ASC) + .setQuery(multiMatchQueryBuilder), + multiMatchResp -> { + MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString()); + assertResponse( + prepareSearch("test") + // id tie sort + .addSort("_score", SortOrder.DESC) + .addSort("id", SortOrder.ASC) + .setQuery(matchQueryBuilder), + matchResp -> { + assertThat( + "field: " + field + " query: " + builder.toString(), + multiMatchResp.getHits().getTotalHits().value, + equalTo(matchResp.getHits().getTotalHits().value) + ); + SearchHits hits = multiMatchResp.getHits(); + if (field.startsWith("missing")) { + assertEquals(0, hits.getHits().length); + } + for (int j = 0; j < hits.getHits().length; j++) { + assertThat(hits.getHits()[j].getScore(), equalTo(matchResp.getHits().getHits()[j].getScore())); + assertThat(hits.getHits()[j].getId(), equalTo(matchResp.getHits().getHits()[j].getId())); + } + } + ); + } ); - SearchHits hits = multiMatchResp.getHits(); - if (field.startsWith("missing")) { - assertEquals(0, hits.getHits().length); - } - for (int j = 0; j < hits.getHits().length; j++) { - assertThat(hits.getHits()[j].getScore(), equalTo(matchResp.getHits().getHits()[j].getScore())); - assertThat(hits.getHits()[j].getId(), equalTo(matchResp.getHits().getHits()[j].getId())); - } } } @@ -435,23 +456,24 @@ public void testEquivalence() { MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("marvel hero captain america", "*_name", randomBoolean() ? "category" : "categ*"); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery(randomizeType(multiMatchQueryBuilder.operator(Operator.OR).type(type))) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - disMaxQuery().add(matchQuery("full_name", "marvel hero captain america")) - .add(matchQuery("first_name", "marvel hero captain america")) - .add(matchQuery("last_name", "marvel hero captain america")) - .add(matchQuery("category", "marvel hero captain america")) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery(randomizeType(multiMatchQueryBuilder.operator(Operator.OR).type(type))), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + disMaxQuery().add(matchQuery("full_name", "marvel hero captain america")) + .add(matchQuery("first_name", "marvel hero captain america")) + .add(matchQuery("last_name", "marvel hero captain america")) + .add(matchQuery("category", "marvel hero captain america")) + ), + right -> assertEquivalent("marvel hero captain america", left, right) ) - .get(); - assertEquivalent("marvel hero captain america", left, right); + ); } { @@ -461,64 +483,68 @@ public void testEquivalence() { MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("captain america", "*_name", randomBoolean() ? "category" : "categ*"); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - randomizeType(multiMatchQueryBuilder.operator(op).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch).type(type)) - ) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should( - randomBoolean() - ? termQuery("full_name", "captain america") - : matchQuery("full_name", "captain america").operator(op) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + randomizeType( + multiMatchQueryBuilder.operator(op).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch).type(type) ) - .should(matchQuery("first_name", "captain america").operator(op)) - .should(matchQuery("last_name", "captain america").operator(op)) - .should(matchQuery("category", "captain america").operator(op)) + ), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should( + randomBoolean() + ? termQuery("full_name", "captain america") + : matchQuery("full_name", "captain america").operator(op) + ) + .should(matchQuery("first_name", "captain america").operator(op)) + .should(matchQuery("last_name", "captain america").operator(op)) + .should(matchQuery("category", "captain america").operator(op)) + ), + right -> assertEquivalent("captain america", left, right) ) - .get(); - assertEquivalent("captain america", left, right); + ); } { String minShouldMatch = randomBoolean() ? null : "" + between(0, 1); - SearchResponse left = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - randomizeType( - multiMatchQuery("capta", "full_name", "first_name", "last_name", "category").type( - MatchQueryParser.Type.PHRASE_PREFIX - ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) - ) - ) - .get(); - - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should(matchPhrasePrefixQuery("full_name", "capta")) - .should(matchPhrasePrefixQuery("first_name", "capta")) - .should(matchPhrasePrefixQuery("last_name", "capta")) - .should(matchPhrasePrefixQuery("category", "capta")) + assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + randomizeType( + multiMatchQuery("capta", "full_name", "first_name", "last_name", "category").type( + MatchQueryParser.Type.PHRASE_PREFIX + ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) + ) + ), + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should(matchPhrasePrefixQuery("full_name", "capta")) + .should(matchPhrasePrefixQuery("first_name", "capta")) + .should(matchPhrasePrefixQuery("last_name", "capta")) + .should(matchPhrasePrefixQuery("category", "capta")) + ), + right -> assertEquivalent("capta", left, right) ) - .get(); - assertEquivalent("capta", left, right); + ); } { String minShouldMatch = randomBoolean() ? null : "" + between(0, 1); - SearchResponse left; + SearchRequestBuilder leftSearch; if (randomBoolean()) { - left = prepareSearch("test").setSize(numDocs) + leftSearch = prepareSearch("test").setSize(numDocs) .addSort(SortBuilders.scoreSort()) .addSort(SortBuilders.fieldSort("id")) .setQuery( @@ -527,10 +553,9 @@ public void testEquivalence() { MatchQueryParser.Type.PHRASE ).minimumShouldMatch(minShouldMatch) ) - ) - .get(); + ); } else { - left = prepareSearch("test").setSize(numDocs) + leftSearch = prepareSearch("test").setSize(numDocs) .addSort(SortBuilders.scoreSort()) .addSort(SortBuilders.fieldSort("id")) .setQuery( @@ -539,163 +564,206 @@ public void testEquivalence() { MatchQueryParser.Type.PHRASE ).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch) ) - ) - .get(); + ); } - SearchResponse right = prepareSearch("test").setSize(numDocs) - .addSort(SortBuilders.scoreSort()) - .addSort(SortBuilders.fieldSort("id")) - .setQuery( - boolQuery().minimumShouldMatch(minShouldMatch) - .should(matchPhraseQuery("full_name", "captain america")) - .should(matchPhraseQuery("first_name", "captain america")) - .should(matchPhraseQuery("last_name", "captain america")) - .should(matchPhraseQuery("category", "captain america")) + assertResponse( + leftSearch, + left -> assertResponse( + prepareSearch("test").setSize(numDocs) + .addSort(SortBuilders.scoreSort()) + .addSort(SortBuilders.fieldSort("id")) + .setQuery( + boolQuery().minimumShouldMatch(minShouldMatch) + .should(matchPhraseQuery("full_name", "captain america")) + .should(matchPhraseQuery("first_name", "captain america")) + .should(matchPhraseQuery("last_name", "captain america")) + .should(matchPhraseQuery("category", "captain america")) + ), + right -> assertEquivalent("captain america", left, right) ) - .get(); - assertEquivalent("captain america", left, right); + ); } } } public void testCrossFieldMode() throws ExecutionException, InterruptedException { - SearchResponse searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - assertSecondHit(searchResponse, hasId("theone")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theother")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").lenient(true).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .lenient(true) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .analyzer("category") - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").operator(Operator.OR) - ) - ).get(); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> { + assertFirstHit(response, hasId("theother")); + assertSecondHit(response, hasId("theone")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theother")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").lenient(true).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .lenient(true) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .analyzer("category") + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); + + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").operator(Operator.OR) + ) + ), + response -> assertFirstHit(response, hasId("theone")) + ); // test group based on analyzer -- all fields are grouped into a cross field search - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).analyzer("category").operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).analyzer("category").operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); // counter example assertHitCount( prepareSearch("test").setQuery( @@ -721,83 +789,112 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ); // test if boosts work - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10) - .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted - assertSecondHit(searchResponse, hasId("ultimate2")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10) + .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("ultimate1")); // has ultimate in the last_name and that is boosted + assertSecondHit(response, hasId("ultimate2")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); // since we try to treat the matching fields as one field scores are very similar but we have a small bias towards the // more frequent field that acts as a tie-breaker internally - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").type( - MultiMatchQueryBuilder.Type.CROSS_FIELDS - ).operator(Operator.AND) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("ultimate2")); - assertSecondHit(searchResponse, hasId("ultimate1")); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").type( + MultiMatchQueryBuilder.Type.CROSS_FIELDS + ).operator(Operator.AND) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("ultimate2")); + assertSecondHit(response, hasId("ultimate1")); + assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore())); + } + ); // Test group based on numeric fields - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - + assertResponse( + prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); // Two numeric fields together caused trouble at one point! - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true)) - ).get(); - /* - * Doesn't find the one because "alpha 15" isn't a number and we don't - * break on spaces. - */ - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("ultimate1")); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true) + ) + ), + response -> { + /* + * Doesn't find the one because "alpha 15" isn't a number and we don't + * break on spaces. + */ + assertHitCount(response, 1L); + assertFirstHit(response, hasId("ultimate1")); + } + ); // Lenient wasn't always properly lenient with two numeric fields - searchResponse = prepareSearch("test").setQuery( - randomizeType( - multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true) - ) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("ultimate1")); - + assertResponse( + prepareSearch("test").setQuery( + randomizeType( + multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) + .lenient(true) + ) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("ultimate1")); + } + ); // Check that cross fields works with date fields - searchResponse = prepareSearch("test").setQuery( - randomizeType(multiMatchQuery("now", "f*", "date").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true) - ).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("nowHero")); + assertResponse( + prepareSearch("test").setQuery( + randomizeType(multiMatchQuery("now", "f*", "date").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true) + ), + response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("nowHero")); + } + ); } /** @@ -815,18 +912,25 @@ public void testFuzzyFieldLevelBoosting() throws InterruptedException, Execution assertAcked(builder.setMapping("title", "type=text", "body", "type=text")); ensureGreen(); List builders = new ArrayList<>(); - builders.add(client().prepareIndex(idx).setId("1").setSource("title", "foo", "body", "bar")); - builders.add(client().prepareIndex(idx).setId("2").setSource("title", "bar", "body", "foo")); + builders.add(prepareIndex(idx).setId("1").setSource("title", "foo", "body", "bar")); + builders.add(prepareIndex(idx).setId("2").setSource("title", "bar", "body", "foo")); indexRandom(true, false, builders); - SearchResponse searchResponse = prepareSearch(idx).setExplain(true) - .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(Fuzziness.ZERO)) - .get(); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertNotEquals("both documents should be on different shards", hits[0].getShard().getShardId(), hits[1].getShard().getShardId()); - assertEquals("1", hits[0].getId()); - assertEquals("2", hits[1].getId()); - assertThat(hits[0].getScore(), greaterThan(hits[1].getScore())); + assertResponse( + prepareSearch(idx).setExplain(true) + .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(Fuzziness.ZERO)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertNotEquals( + "both documents should be on different shards", + hits[0].getShard().getShardId(), + hits[1].getShard().getShardId() + ); + assertEquals("1", hits[0].getId()); + assertEquals("2", hits[1].getId()); + assertThat(hits[0].getScore(), greaterThan(hits[1].getScore())); + } + ); } private static void assertEquivalent(String query, SearchResponse left, SearchResponse right) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index 882e18eb593aa..d8787b6ef7b16 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.Operator; import org.elasticsearch.search.SearchHit; @@ -28,7 +27,8 @@ import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -44,106 +44,93 @@ public void setup() throws Exception { public void testBasicAllQuery() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); + reqs.add(prepareIndex("test").setId("2").setSource("f2", "Bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); - assertHitCount(resp, 3L); - assertHits(resp.getHits(), "1", "2", "3"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> { + assertHitCount(response, 3L); + assertHits(response.getHits(), "1", "2", "3"); + }); } public void testWithDate() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testWithLotsOfTypes() throws Exception { List reqs = new ArrayList<>(); - reqs.add( - client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1") - ); - reqs.add( - client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") - ); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testDocWithAllTypes() throws Exception { List reqs = new ArrayList<>(); String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json"); - reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); + reqs.add(prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("Baz")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("19")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("19")), response -> assertHits(response.getHits(), "1")); // nested doesn't match because it's hidden - resp = prepareSearch("test").setQuery(queryStringQuery("1476383971")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1476383971")), response -> assertHits(response.getHits(), "1")); // bool doesn't match - resp = prepareSearch("test").setQuery(queryStringQuery("7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("23")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1293")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("42")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1.7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("1.5")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("23")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("42")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")), response -> assertHits(response.getHits(), "1")); // binary doesn't match // suggest doesn't match // geo_point doesn't match @@ -151,22 +138,23 @@ public void testDocWithAllTypes() throws Exception { public void testKeywordWithWhitespace() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); + reqs.add(prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(queryStringQuery("foo")).get(); - assertHits(resp.getHits(), "3"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(queryStringQuery("bar")).get(); - assertHits(resp.getHits(), "2", "3"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(queryStringQuery("Foo Bar")).get(); - assertHits(resp.getHits(), "1", "2", "3"); - assertHitCount(resp, 3L); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> { + assertHits(response.getHits(), "3"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar")), response -> { + assertHits(response.getHits(), "2", "3"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(queryStringQuery("Foo Bar")), response -> { + assertHits(response.getHits(), "1", "2", "3"); + assertHitCount(response, 3L); + }); } public void testAllFields() throws Exception { @@ -177,20 +165,21 @@ public void testAllFields() throws Exception { ensureGreen("test_1"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); + reqs.add(prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); indexRandom(true, false, reqs); assertHitCount(prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND)), 0L); - SearchResponse resp = prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.OR)).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); + assertResponse(prepareSearch("test_1").setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.OR)), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); } public void testPhraseQueryOnFieldWithNoPositions() throws Exception { List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "foo bar", "f4", "chicken parmesan")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo bar", "f4", "eggplant parmesan")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "foo bar", "f4", "chicken parmesan")); indexRandom(true, false, reqs); assertHitCount(prepareSearch("test").setQuery(queryStringQuery("\"eggplant parmesan\"").lenient(true)), 0L); @@ -222,58 +211,54 @@ public void testAllFieldsWithSpecifiedLeniency() throws IOException { public void testFieldAlias() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("value").field("f3_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("value").field("f3_alias")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasWithEmbeddedFieldNames() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("f3_alias:value AND f2:three")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("f3_alias:value AND f2:three")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "3"); + }); } public void testFieldAliasWithWildcardField() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("value").field("f3_*")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("value").field("f3_*")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasOnDisallowedFieldType() throws Exception { List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); // The wildcard field matches aliases for both a text and geo_point field. // By default, the geo_point field should be ignored when building the query. - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "1"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + }); } private void assertHits(SearchHits hits, String... ids) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java index c9c7c2a56eea9..0a35c33673343 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.query; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; @@ -32,6 +31,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; @@ -66,31 +66,35 @@ public void testScriptScore() { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); + prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); Map params = new HashMap<>(); params.put("param1", 0.1); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", params); - SearchResponse resp = prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "8", "6", "4", "2"); - assertFirstHit(resp, hasScore(1.0f)); - assertSecondHit(resp, hasScore(0.8f)); - assertThirdHit(resp, hasScore(0.6f)); + assertNoFailuresAndResponse( + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)), + response -> { + assertOrderedSearchHits(response, "10", "8", "6", "4", "2"); + assertFirstHit(response, hasScore(1.0f)); + assertSecondHit(response, hasScore(0.8f)); + assertThirdHit(response, hasScore(0.6f)); + } + ); // applying min score - resp = prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script).setMinScore(0.6f)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "8", "6"); + assertNoFailuresAndResponse( + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script).setMinScore(0.6f)), + response -> assertOrderedSearchHits(response, "10", "8", "6") + ); } public void testScriptScoreBoolQuery() { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); + prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); } refresh(); @@ -98,11 +102,11 @@ public void testScriptScoreBoolQuery() { params.put("param1", 0.1); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", params); QueryBuilder boolQuery = boolQuery().should(matchQuery("field1", "text1")).should(matchQuery("field1", "text10")); - SearchResponse resp = prepareSearch("test-index").setQuery(scriptScoreQuery(boolQuery, script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "10", "1"); - assertFirstHit(resp, hasScore(1.0f)); - assertSecondHit(resp, hasScore(0.1f)); + assertNoFailuresAndResponse(prepareSearch("test-index").setQuery(scriptScoreQuery(boolQuery, script)), response -> { + assertOrderedSearchHits(response, "10", "1"); + assertFirstHit(response, hasScore(1.0f)); + assertSecondHit(response, hasScore(0.1f)); + }); } // test that when the internal query is rewritten script_score works well @@ -111,16 +115,17 @@ public void testRewrittenQuery() { prepareCreate("test-index2").setSettings(Settings.builder().put("index.number_of_shards", 1)) .setMapping("field1", "type=date", "field2", "type=double") ); - client().prepareIndex("test-index2").setId("1").setSource("field1", "2019-09-01", "field2", 1).get(); - client().prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); - client().prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); + prepareIndex("test-index2").setId("1").setSource("field1", "2019-09-01", "field2", 1).get(); + prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); + prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); refresh(); RangeQueryBuilder rangeQB = new RangeQueryBuilder("field1").from("2019-01-01"); // the query should be rewritten to from:null Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['field2'].value * param1", Map.of("param1", 0.1)); - SearchResponse resp = prepareSearch("test-index2").setQuery(scriptScoreQuery(rangeQB, script)).get(); - assertNoFailures(resp); - assertOrderedSearchHits(resp, "3", "2", "1"); + assertNoFailuresAndResponse( + prepareSearch("test-index2").setQuery(scriptScoreQuery(rangeQB, script)), + response -> assertOrderedSearchHits(response, "3", "2", "1") + ); } public void testDisallowExpensiveQueries() { @@ -128,7 +133,7 @@ public void testDisallowExpensiveQueries() { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); + prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index 918746021f381..ea2decff18cd0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -17,7 +17,6 @@ import org.apache.lucene.util.AttributeSource; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.document.DocumentField; @@ -106,6 +105,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -141,9 +142,9 @@ public void testEmptyQueryString() throws ExecutionException, InterruptedExcepti createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "the quick brown fox jumps"), - client().prepareIndex("test").setId("2").setSource("field1", "quick brown"), - client().prepareIndex("test").setId("3").setSource("field1", "quick") + prepareIndex("test").setId("1").setSource("field1", "the quick brown fox jumps"), + prepareIndex("test").setId("2").setSource("field1", "quick brown"), + prepareIndex("test").setId("3").setSource("field1", "quick") ); assertHitCount(prepareSearch().setQuery(queryStringQuery("quick")), 3L); @@ -153,9 +154,9 @@ public void testEmptyQueryString() throws ExecutionException, InterruptedExcepti // see https://github.com/elastic/elasticsearch/issues/3177 public void testIssue3177() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); - client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field1", "value2").get(); + prepareIndex("test").setId("3").setSource("field1", "value3").get(); ensureGreen(); waitForRelocation(); forceMerge(); @@ -185,8 +186,8 @@ public void testIndexOptions() throws Exception { assertAcked(prepareCreate("test").setMapping("field1", "type=text,index_options=docs")); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), - client().prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") + prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), + prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") ); assertHitCount(prepareSearch().setQuery(matchPhraseQuery("field2", "quick brown").slop(0)), 1L); @@ -204,44 +205,55 @@ public void testConstantScoreQuery() throws Exception { createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), - client().prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") + prepareIndex("test").setId("1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), + prepareIndex("test").setId("2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox") ); - SearchResponse searchResponse = prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get(); - assertHitCount(searchResponse, 2L); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertThat(searchHit, hasScore(1.0f)); - } - - searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - - prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - - searchResponse = prepareSearch("test").setQuery( - constantScoreQuery( - boolQuery().must(matchAllQuery()) - .must( - constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat())) - ) - ) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertThat(searchHit, hasScore(1.0f)); - } - + assertResponse(prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))), response -> { + assertHitCount(response, 2L); + for (SearchHit searchHit : response.getHits().getHits()) { + assertThat(searchHit, hasScore(1.0f)); + } + }); + assertResponse( + prepareSearch("test").setQuery( + boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + } + ); + assertResponse( + prepareSearch("test").setQuery( + constantScoreQuery( + boolQuery().must(matchAllQuery()) + .must( + constantScoreQuery(matchQuery("field1", "quick")).boost( + 1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()) + ) + ) + ) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + for (SearchHit searchHit : response.getHits().getHits()) { + assertThat(searchHit, hasScore(1.0f)); + } + } + ); int num = scaledRandomIntBetween(100, 200); IndexRequestBuilder[] builders = new IndexRequestBuilder[num]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test_1").setId("" + i).setSource("f", English.intToEnglish(i)); + builders[i] = prepareIndex("test_1").setId("" + i).setSource("f", English.intToEnglish(i)); } createIndex("test_1"); indexRandom(true, builders); @@ -249,53 +261,57 @@ public void testConstantScoreQuery() throws Exception { int queryRounds = scaledRandomIntBetween(10, 20); for (int i = 0; i < queryRounds; i++) { MatchQueryBuilder matchQuery = matchQuery("f", English.intToEnglish(between(0, num))); - searchResponse = prepareSearch("test_1").setQuery(constantScoreQuery(matchQuery)).setSize(num).get(); - long totalHits = searchResponse.getHits().getTotalHits().value; - SearchHits hits = searchResponse.getHits(); - for (SearchHit searchHit : hits) { - assertThat(searchHit, hasScore(1.0f)); - } - searchResponse = prepareSearch("test_1").setQuery( - boolQuery().must(matchAllQuery()) - .must(constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()))) - ).setSize(num).get(); - hits = searchResponse.getHits(); - assertThat(hits.getTotalHits().value, equalTo(totalHits)); - if (totalHits > 1) { - float expected = hits.getAt(0).getScore(); + final long[] constantScoreTotalHits = new long[1]; + assertResponse(prepareSearch("test_1").setQuery(constantScoreQuery(matchQuery)).setSize(num), response -> { + constantScoreTotalHits[0] = response.getHits().getTotalHits().value; + SearchHits hits = response.getHits(); for (SearchHit searchHit : hits) { - assertThat(searchHit, hasScore(expected)); + assertThat(searchHit, hasScore(1.0f)); } - } + }); + assertResponse( + prepareSearch("test_1").setQuery( + boolQuery().must(matchAllQuery()) + .must(constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean() ? 0.0f : random.nextFloat()))) + ).setSize(num), + response -> { + SearchHits hits = response.getHits(); + assertThat(hits.getTotalHits().value, equalTo(constantScoreTotalHits[0])); + if (constantScoreTotalHits[0] > 1) { + float expected = hits.getAt(0).getScore(); + for (SearchHit searchHit : hits) { + assertThat(searchHit, hasScore(expected)); + } + } + } + ); } } // see #3521 public void testAllDocsQueryString() throws InterruptedException, ExecutionException { createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("foo", "bar"), - client().prepareIndex("test").setId("2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test").setId("1").setSource("foo", "bar"), prepareIndex("test").setId("2").setSource("foo", "bar")); int iters = scaledRandomIntBetween(100, 200); for (int i = 0; i < iters; i++) { assertHitCount(prepareSearch("test").setQuery(queryStringQuery("*:*^10.0").boost(10.0f)), 2L); - SearchResponse searchResponse = prepareSearch("test").setQuery( - boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery())) - ).get(); - assertHitCount(searchResponse, 2L); - assertThat((double) searchResponse.getHits().getAt(0).getScore(), closeTo(2.0, 0.1)); - assertThat((double) searchResponse.getHits().getAt(1).getScore(), closeTo(2.0, 0.1)); + assertResponse( + prepareSearch("test").setQuery(boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery()))), + response -> { + assertHitCount(response, 2L); + assertThat((double) response.getHits().getAt(0).getScore(), closeTo(2.0, 0.1)); + assertThat((double) response.getHits().getAt(1).getScore(), closeTo(2.0, 0.1)); + } + ); } } public void testQueryStringAnalyzedWildcard() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); + prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("value*")), 1L); @@ -308,7 +324,7 @@ public void testQueryStringAnalyzedWildcard() throws Exception { public void testLowercaseExpandedTerms() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); + prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("VALUE_3~1")), 1L); @@ -326,7 +342,7 @@ public void testDateRangeInQueryString() { ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); String aMonthAgo = DateTimeFormatter.ISO_LOCAL_DATE.format(now.minusMonths(1)); String aMonthFromNow = DateTimeFormatter.ISO_LOCAL_DATE.format(now.plusMonths(1)); - client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); + prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")), 1L); @@ -349,7 +365,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { ZoneId timeZone = randomZone(); String now = DateFormatter.forPattern("strict_date_optional_time").format(Instant.now().atZone(timeZone)); logger.info(" --> Using time_zone [{}], now is [{}]", timeZone.getId(), now); - client().prepareIndex("test").setId("1").setSource("past", now).get(); + prepareIndex("test").setId("1").setSource("past", now).get(); refresh(); assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]").timeZone(timeZone.getId())), 1L); @@ -361,8 +377,8 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); - client().prepareIndex("test").setId("1").setSource("past", "2015-04-05T23:00:00+0000").get(); - client().prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); + prepareIndex("test").setId("1").setSource("past", "2015-04-05T23:00:00+0000").get(); + prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); refresh(); // Timezone set with dates @@ -389,9 +405,9 @@ public void testIdsQueryTestsIdIndexed() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "value1"), - client().prepareIndex("test").setId("2").setSource("field1", "value2"), - client().prepareIndex("test").setId("3").setSource("field1", "value3") + prepareIndex("test").setId("1").setSource("field1", "value1"), + prepareIndex("test").setId("2").setSource("field1", "value2"), + prepareIndex("test").setId("3").setSource("field1", "value3") ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(constantScoreQuery(idsQuery().addIds("1", "3"))), "1", "3"); @@ -408,7 +424,7 @@ public void testTermIndexQuery() throws Exception { for (String indexName : indexNames) { assertAcked(indicesAdmin().prepareCreate(indexName)); - indexRandom(true, client().prepareIndex(indexName).setId(indexName + "1").setSource("field1", "value1")); + indexRandom(true, prepareIndex(indexName).setId(indexName + "1").setSource("field1", "value1")); } @@ -431,8 +447,7 @@ public void testFilterExistsMissing() throws Exception { indexRandom( true, - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startObject("obj1") @@ -443,8 +458,7 @@ public void testFilterExistsMissing() throws Exception { .field("field2", "value2_1") .endObject() ), - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .startObject("obj1") @@ -454,8 +468,7 @@ public void testFilterExistsMissing() throws Exception { .field("field1", "value1_2") .endObject() ), - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .startObject("obj2") @@ -465,8 +478,7 @@ public void testFilterExistsMissing() throws Exception { .field("field2", "value2_3") .endObject() ), - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .startObject("obj2") @@ -492,7 +504,7 @@ public void testFilterExistsMissing() throws Exception { public void testPassQueryOrFilterAsJSONString() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); assertHitCount(prepareSearch().setQuery(wrapper), 1L); @@ -507,7 +519,7 @@ public void testPassQueryOrFilterAsJSONString() throws Exception { public void testFiltersWithCustomCacheKey() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); @@ -520,19 +532,20 @@ public void testMatchQueryNumeric() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("long", 1L, "double", 1.0d), - client().prepareIndex("test").setId("2").setSource("long", 2L, "double", 2.0d), - client().prepareIndex("test").setId("3").setSource("long", 3L, "double", 3.0d) - ); - - SearchResponse searchResponse = prepareSearch().setQuery(matchQuery("long", "1")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - - searchResponse = prepareSearch().setQuery(matchQuery("double", "2")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + prepareIndex("test").setId("1").setSource("long", 1L, "double", 1.0d), + prepareIndex("test").setId("2").setSource("long", 2L, "double", 2.0d), + prepareIndex("test").setId("3").setSource("long", 3L, "double", 3.0d) + ); + + assertResponse(prepareSearch().setQuery(matchQuery("long", "1")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch().setQuery(matchQuery("double", "2")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + }); } public void testMatchQueryFuzzy() throws Exception { @@ -540,8 +553,8 @@ public void testMatchQueryFuzzy() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("text", "Unit"), - client().prepareIndex("test").setId("2").setSource("text", "Unity") + prepareIndex("test").setId("1").setSource("text", "Unit"), + prepareIndex("test").setId("2").setSource("text", "Unity") ); assertHitCount(prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.fromEdits(0))), 0L); @@ -564,9 +577,9 @@ public void testMultiMatchQuery() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value4", "field3", "value3"), - client().prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value5", "field3", "value2"), - client().prepareIndex("test").setId("3").setSource("field1", "value3", "field2", "value6", "field3", "value1") + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value4", "field3", "value3"), + prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value5", "field3", "value2"), + prepareIndex("test").setId("3").setSource("field1", "value3", "field2", "value6", "field3", "value1") ); MultiMatchQueryBuilder builder = multiMatchQuery("value1 value2 value4", "field1", "field2"); @@ -594,12 +607,12 @@ public void testMultiMatchQuery() throws Exception { builder = multiMatchQuery("value1").field("field1").field("field3", 1.5f).operator(Operator.AND); // Operator only applies on terms // inside a field! Fields are // always OR-ed together. - SearchResponse searchResponse = prepareSearch().setQuery(builder).get(); - assertHitCount(searchResponse, 2L); - assertSearchHits(searchResponse, "3", "1"); - + assertResponse(prepareSearch().setQuery(builder), response -> { + assertHitCount(response, 2L); + assertSearchHits(response, "3", "1"); + }); // Test lenient - client().prepareIndex("test").setId("3").setSource("field1", "value7", "field2", "value8", "field4", 5).get(); + prepareIndex("test").setId("3").setSource("field1", "value7", "field2", "value8", "field4", 5).get(); refresh(); builder = multiMatchQuery("value1", "field1", "field2", "field4"); @@ -607,19 +620,23 @@ public void testMultiMatchQuery() throws Exception { // when the number for shards is randomized and we expect failures // we can either run into partial or total failures depending on the current number of shards Matcher reasonMatcher = containsString("NumberFormatException: For input string: \"value1\""); - ShardSearchFailure[] shardFailures; try { - prepareSearch().setQuery(builder).get(); - shardFailures = searchResponse.getShardFailures(); - assertThat("Expected shard failures, got none", shardFailures, not(emptyArray())); + assertResponse(prepareSearch().setQuery(builder), response -> { + ShardSearchFailure[] shardFailures = response.getShardFailures(); + assertThat("Expected shard failures, got none", shardFailures, not(emptyArray())); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(shardSearchFailure.reason(), reasonMatcher); + } + }); + } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - shardFailures = e.shardFailures(); - } - - for (ShardSearchFailure shardSearchFailure : shardFailures) { - assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(shardSearchFailure.reason(), reasonMatcher); + ShardSearchFailure[] shardFailures = e.shardFailures(); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + assertThat(shardSearchFailure.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(shardSearchFailure.reason(), reasonMatcher); + } } builder.lenient(true); @@ -628,8 +645,8 @@ public void testMultiMatchQuery() throws Exception { public void testMatchQueryZeroTermsQuery() { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field1", "value2").get(); refresh(); BoolQueryBuilder boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(ZeroTermsQueryOption.NONE)) @@ -646,8 +663,8 @@ public void testMatchQueryZeroTermsQuery() { public void testMultiMatchQueryZeroTermsQuery() { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value3", "field2", "value4").get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); + prepareIndex("test").setId("2").setSource("field1", "value3", "field2", "value4").get(); refresh(); BoolQueryBuilder boolQuery = boolQuery().must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(ZeroTermsQueryOption.NONE)) @@ -665,42 +682,43 @@ public void testMultiMatchQueryZeroTermsQuery() { public void testMultiMatchQueryMinShouldMatch() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); + prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); multiMatchQuery.minimumShouldMatch("70%"); - SearchResponse searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); multiMatchQuery.minimumShouldMatch("30%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + }); multiMatchQuery.minimumShouldMatch("70%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); multiMatchQuery.minimumShouldMatch("30%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + }); multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1"); multiMatchQuery.minimumShouldMatch("100%"); assertHitCount(prepareSearch().setQuery(multiMatchQuery), 0L); multiMatchQuery.minimumShouldMatch("70%"); - searchResponse = prepareSearch().setQuery(multiMatchQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertResponse(prepareSearch().setQuery(multiMatchQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); // Min should match > # optional clauses returns no docs. multiMatchQuery = multiMatchQuery("value1 value2 value3", "field1", "field2"); multiMatchQuery.minimumShouldMatch("4"); @@ -709,16 +727,16 @@ public void testMultiMatchQueryMinShouldMatch() { public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); - client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); + prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); + prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); - SearchResponse searchResponse = prepareSearch().setQuery(boolQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(boolQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(1)) // Only one should clause is defined, returns no docs. @@ -728,10 +746,10 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws boolQuery = boolQuery().should(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)) .minimumShouldMatch(1); - searchResponse = prepareSearch().setQuery(boolQuery).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch().setQuery(boolQuery), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); boolQuery = boolQuery().must(termQuery("field1", "value1")) .must(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); assertHitCount(prepareSearch().setQuery(boolQuery), 0L); @@ -739,14 +757,14 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws public void testFuzzyQueryString() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); - client().prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); + prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); + prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(queryStringQuery("str:kimcy~1")).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertNoFailuresAndResponse(prepareSearch().setQuery(queryStringQuery("str:kimcy~1")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); } @TestIssueLogging( @@ -760,38 +778,37 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { indexRandom( true, false, - client().prepareIndex("test").setId("1").setSource("important", "phrase match", "less_important", "nothing important"), - client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") + prepareIndex("test").setId("1").setSource("important", "phrase match", "less_important", "nothing important"), + prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") ); - SearchResponse searchResponse = prepareSearch().setQuery( - queryStringQuery("\"phrase match\"").field("important", boost).field("less_important") - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThat( - (double) searchResponse.getHits().getAt(0).getScore(), - closeTo(boost * searchResponse.getHits().getAt(1).getScore(), .1) + assertResponse( + prepareSearch().setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThat((double) response.getHits().getAt(0).getScore(), closeTo(boost * response.getHits().getAt(1).getScore(), .1)); + } ); } public void testSpecialRangeSyntaxInQueryString() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); - client().prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); + prepareIndex("test").setId("1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get(); + prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(queryStringQuery("num:>19")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(queryStringQuery("num:>19")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + }); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>20")), 0L); - searchResponse = prepareSearch().setQuery(queryStringQuery("num:>=20")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - + assertResponse(prepareSearch().setQuery(queryStringQuery("num:>=20")), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("2")); + }); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>11")), 2L); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<20")), 1L); assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<=20")), 2L); @@ -803,10 +820,10 @@ public void testEmptytermsQuery() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("term", "1"), - client().prepareIndex("test").setId("2").setSource("term", "2"), - client().prepareIndex("test").setId("3").setSource("term", "3"), - client().prepareIndex("test").setId("4").setSource("term", "4") + prepareIndex("test").setId("1").setSource("term", "1"), + prepareIndex("test").setId("2").setSource("term", "2"), + prepareIndex("test").setId("3").setSource("term", "3"), + prepareIndex("test").setId("4").setSource("term", "4") ); assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("term", new String[0]))), 0L); assertHitCount(prepareSearch("test").setQuery(idsQuery()), 0L); @@ -817,10 +834,10 @@ public void testTermsQuery() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("str", "1", "lng", 1L, "dbl", 1.0d), - client().prepareIndex("test").setId("2").setSource("str", "2", "lng", 2L, "dbl", 2.0d), - client().prepareIndex("test").setId("3").setSource("str", "3", "lng", 3L, "dbl", 3.0d), - client().prepareIndex("test").setId("4").setSource("str", "4", "lng", 4L, "dbl", 4.0d) + prepareIndex("test").setId("1").setSource("str", "1", "lng", 1L, "dbl", 1.0d), + prepareIndex("test").setId("2").setSource("str", "2", "lng", 2L, "dbl", 2.0d), + prepareIndex("test").setId("3").setSource("str", "3", "lng", 3L, "dbl", 3.0d), + prepareIndex("test").setId("4").setSource("str", "4", "lng", 4L, "dbl", 4.0d) ); assertSearchHitsWithoutFailures(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("str", "1", "4"))), "1", "4"); assertSearchHitsWithoutFailures( @@ -877,12 +894,11 @@ public void testTermsLookupFilter() throws Exception { indexRandom( true, - client().prepareIndex("lookup").setId("1").setSource("terms", new String[] { "1", "3" }), - client().prepareIndex("lookup").setId("2").setSource("terms", new String[] { "2" }), - client().prepareIndex("lookup").setId("3").setSource("terms", new String[] { "2", "4" }), - client().prepareIndex("lookup").setId("4").setSource("other", "value"), - client().prepareIndex("lookup2") - .setId("1") + prepareIndex("lookup").setId("1").setSource("terms", new String[] { "1", "3" }), + prepareIndex("lookup").setId("2").setSource("terms", new String[] { "2" }), + prepareIndex("lookup").setId("3").setSource("terms", new String[] { "2", "4" }), + prepareIndex("lookup").setId("4").setSource("other", "value"), + prepareIndex("lookup2").setId("1") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -896,8 +912,7 @@ public void testTermsLookupFilter() throws Exception { .endArray() .endObject() ), - client().prepareIndex("lookup2") - .setId("2") + prepareIndex("lookup2").setId("2") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -908,8 +923,7 @@ public void testTermsLookupFilter() throws Exception { .endArray() .endObject() ), - client().prepareIndex("lookup2") - .setId("3") + prepareIndex("lookup2").setId("3") .setSource( XContentFactory.jsonBuilder() .startObject() @@ -923,11 +937,11 @@ public void testTermsLookupFilter() throws Exception { .endArray() .endObject() ), - client().prepareIndex("lookup3").setId("1").setSource("terms", new String[] { "1", "3" }), - client().prepareIndex("test").setId("1").setSource("term", "1"), - client().prepareIndex("test").setId("2").setSource("term", "2"), - client().prepareIndex("test").setId("3").setSource("term", "3"), - client().prepareIndex("test").setId("4").setSource("term", "4") + prepareIndex("lookup3").setId("1").setSource("terms", new String[] { "1", "3" }), + prepareIndex("test").setId("1").setSource("term", "1"), + prepareIndex("test").setId("2").setSource("term", "2"), + prepareIndex("test").setId("3").setSource("term", "3"), + prepareIndex("test").setId("4").setSource("term", "4") ); assertSearchHitsWithoutFailures( prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup", "1", "terms"))), @@ -991,30 +1005,31 @@ public void testTermsLookupFilter() throws Exception { public void testBasicQueryById() throws Exception { assertAcked(prepareCreate("test")); - client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); - client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); - client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); + prepareIndex("test").setId("1").setSource("field1", "value1").get(); + prepareIndex("test").setId("2").setSource("field1", "value2").get(); + prepareIndex("test").setId("3").setSource("field1", "value3").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")).get(); - assertHitCount(searchResponse, 3L); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2")), response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getHits().length, equalTo(2)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getHits().length, equalTo(1)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2")), response -> { + assertHitCount(response, 2L); + assertThat(response.getHits().getHits().length, equalTo(2)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getHits().length, equalTo(1)); + }); + assertResponse(prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")), response -> { + assertHitCount(response, 3L); + assertThat(response.getHits().getHits().length, equalTo(3)); + }); } public void testNumericTermsAndRanges() throws Exception { @@ -1035,102 +1050,119 @@ public void testNumericTermsAndRanges() throws Exception { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("num_byte", 1, "num_short", 1, "num_integer", 1, "num_long", 1, "num_float", 1, "num_double", 1) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource("num_byte", 2, "num_short", 2, "num_integer", 2, "num_long", 2, "num_float", 2, "num_double", 2) .get(); - client().prepareIndex("test") - .setId("17") + prepareIndex("test").setId("17") .setSource("num_byte", 17, "num_short", 17, "num_integer", 17, "num_long", 17, "num_float", 17, "num_double", 17) .get(); refresh(); - SearchResponse searchResponse; logger.info("--> term query on 1"); - searchResponse = prepareSearch("test").setQuery(termQuery("num_byte", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_short", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_integer", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_long", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_float", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termQuery("num_double", 1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(termQuery("num_byte", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_short", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_integer", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_long", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_float", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termQuery("num_double", 1)), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> terms query on 1"); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> term filter on 1"); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); logger.info("--> terms filter on 1"); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); - searchResponse = prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("1")); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); + assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))), response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("1")); + }); } public void testNumericRangeFilter_2826() throws Exception { @@ -1151,10 +1183,10 @@ public void testNumericRangeFilter_2826() throws Exception { ) ); - client().prepareIndex("test").setId("1").setSource("field1", "test1", "num_long", 1).get(); - client().prepareIndex("test").setId("2").setSource("field1", "test1", "num_long", 2).get(); - client().prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); - client().prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); + prepareIndex("test").setId("1").setSource("field1", "test1", "num_long", 1).get(); + prepareIndex("test").setId("2").setSource("field1", "test1", "num_long", 2).get(); + prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); + prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); refresh(); assertHitCount( @@ -1193,10 +1225,10 @@ public void testMustNot() throws InterruptedException { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), - client().prepareIndex("test").setId("2").setSource("description", "foo other anything"), - client().prepareIndex("test").setId("3").setSource("description", "foo other"), - client().prepareIndex("test").setId("4").setSource("description", "foo") + prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), + prepareIndex("test").setId("2").setSource("description", "foo other anything"), + prepareIndex("test").setId("3").setSource("description", "foo other"), + prepareIndex("test").setId("4").setSource("description", "foo") ); assertHitCount(prepareSearch("test").setQuery(matchAllQuery()).setSearchType(SearchType.DFS_QUERY_THEN_FETCH), 4L); @@ -1210,10 +1242,7 @@ public void testMustNot() throws InterruptedException { public void testIntervals() throws InterruptedException { createIndex("test"); - indexRandom( - true, - client().prepareIndex("test").setId("1").setSource("description", "it's cold outside, there's no kind of atmosphere") - ); + indexRandom(true, prepareIndex("test").setId("1").setSource("description", "it's cold outside, there's no kind of atmosphere")); String json = """ { @@ -1238,8 +1267,7 @@ public void testIntervals() throws InterruptedException { } } }"""; - SearchResponse response = prepareSearch("test").setQuery(wrapperQuery(json)).get(); - assertHitCount(response, 1L); + assertHitCount(prepareSearch("test").setQuery(wrapperQuery(json)), 1L); } // see #2994 @@ -1248,10 +1276,10 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted indexRandom( true, - client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), - client().prepareIndex("test").setId("2").setSource("description", "foo other anything"), - client().prepareIndex("test").setId("3").setSource("description", "foo other"), - client().prepareIndex("test").setId("4").setSource("description", "foo") + prepareIndex("test").setId("1").setSource("description", "foo other anything bar"), + prepareIndex("test").setId("2").setSource("description", "foo other anything"), + prepareIndex("test").setId("3").setSource("description", "foo other"), + prepareIndex("test").setId("4").setSource("description", "foo") ); assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanTermQuery("description", "bar"))), 1L); @@ -1266,10 +1294,10 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted public void testSpanMultiTermQuery() throws IOException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); - client().prepareIndex("test").setId("2").setSource("description", "foo other anything", "count", 2).get(); - client().prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); - client().prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); + prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); + prepareIndex("test").setId("2").setSource("description", "foo other anything", "count", 2).get(); + prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); + prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))), 4); @@ -1287,8 +1315,8 @@ public void testSpanMultiTermQuery() throws IOException { public void testSpanNot() throws IOException, ExecutionException, InterruptedException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); - client().prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); + prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); + prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); refresh(); assertHitCount( @@ -1352,23 +1380,19 @@ public void testSimpleDFSQuery() throws IOException { ) ); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setRouting("Y") .setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100, "type", "s") .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setRouting("X") .setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000, "type", "s") .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setRouting(randomAlphaOfLength(2)) .setSource("online", false, "ts", System.currentTimeMillis() - 100, "type", "bs") .get(); - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setRouting(randomAlphaOfLength(2)) .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") .get(); @@ -1397,7 +1421,7 @@ public void testSimpleDFSQuery() throws IOException { } public void testMultiFieldQueryString() { - client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); logger.info("regular"); assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")), 1); @@ -1420,7 +1444,7 @@ public void testMultiFieldQueryString() { public void testMultiMatchLenientIssue3797() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), 1L); @@ -1431,25 +1455,29 @@ public void testMultiMatchLenientIssue3797() { public void testMinScore() throws ExecutionException, InterruptedException { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("score", 1.5).get(); - client().prepareIndex("test").setId("2").setSource("score", 1.0).get(); - client().prepareIndex("test").setId("3").setSource("score", 2.0).get(); - client().prepareIndex("test").setId("4").setSource("score", 0.5).get(); + prepareIndex("test").setId("1").setSource("score", 1.5).get(); + prepareIndex("test").setId("2").setSource("score", 1.0).get(); + prepareIndex("test").setId("3").setSource("score", 2.0).get(); + prepareIndex("test").setId("4").setSource("score", 0.5).get(); refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f) - ).get(); - assertHitCount(searchResponse, 2); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("1")); + assertResponse( + prepareSearch("test").setQuery( + functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f) + ), + response -> { + assertHitCount(response, 2); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("1")); + } + ); } public void testQueryStringWithSlopAndFields() { assertAcked(prepareCreate("test")); - client().prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); - client().prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); + prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); + prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); refresh(); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), 2); @@ -1480,12 +1508,12 @@ public void testDateProvidedAsNumber() throws InterruptedException { assertAcked(indicesAdmin().preparePutMapping("test").setSource("field", "type=date,format=epoch_millis").get()); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field", 1000000000001L), - client().prepareIndex("test").setId("2").setSource("field", 1000000000000L), - client().prepareIndex("test").setId("3").setSource("field", 999999999999L), - client().prepareIndex("test").setId("4").setSource("field", 1000000000002L), - client().prepareIndex("test").setId("5").setSource("field", 1000000000003L), - client().prepareIndex("test").setId("6").setSource("field", 999999999999L) + prepareIndex("test").setId("1").setSource("field", 1000000000001L), + prepareIndex("test").setId("2").setSource("field", 1000000000000L), + prepareIndex("test").setId("3").setSource("field", 999999999999L), + prepareIndex("test").setId("4").setSource("field", 1000000000002L), + prepareIndex("test").setId("5").setSource("field", 1000000000003L), + prepareIndex("test").setId("6").setSource("field", 999999999999L) ); assertHitCount(prepareSearch("test").setSize(0).setQuery(rangeQuery("field").gte(1000000000000L)), 4); @@ -1497,74 +1525,104 @@ public void testRangeQueryWithTimeZone() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date", "2014-01-01", "num", 1), - client().prepareIndex("test").setId("2").setSource("date", "2013-12-31T23:00:00", "num", 2), - client().prepareIndex("test").setId("3").setSource("date", "2014-01-01T01:00:00", "num", 3), + prepareIndex("test").setId("1").setSource("date", "2014-01-01", "num", 1), + prepareIndex("test").setId("2").setSource("date", "2013-12-31T23:00:00", "num", 2), + prepareIndex("test").setId("3").setSource("date", "2014-01-01T01:00:00", "num", 3), // Now in UTC+1 - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00")), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); // We explicitly define a time zone in the from/to dates so whatever the time zone is, it won't be used - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); // We define a time zone to be applied to the filter and from/to have no time zone - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("1")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("2")); - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") - ).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") + ), + response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + } + ); + assertResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")), response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("4")); + }); } /** @@ -1595,8 +1653,8 @@ public void testRangeQueryWithLocaleMapping() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), - client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") + prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), + prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") ); assertHitCount( @@ -1614,7 +1672,7 @@ public void testRangeQueryWithLocaleMapping() throws Exception { } public void testSearchEmptyDoc() { - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); refresh(); assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); @@ -1624,8 +1682,8 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE createIndex("test1"); indexRandom( true, - client().prepareIndex("test1").setId("1").setSource("field", "Johnnie Walker Black Label"), - client().prepareIndex("test1").setId("2").setSource("field", "trying out Elasticsearch") + prepareIndex("test1").setId("1").setSource("field", "Johnnie Walker Black Label"), + prepareIndex("test1").setId("2").setSource("field", "trying out Elasticsearch") ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(matchPhrasePrefixQuery("field", "Johnnie la").slop(between(2, 5))), "1"); @@ -1635,39 +1693,41 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE public void testQueryStringParserCache() throws Exception { createIndex("test"); - indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); - - SearchResponse response = prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - - float first = response.getHits().getAt(0).getScore(); + indexRandom(true, false, prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); + final float[] first = new float[1]; + assertResponse( + prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + first[0] = response.getHits().getAt(0).getScore(); + } + ); for (int i = 0; i < 100; i++) { - response = prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - float actual = response.getHits().getAt(0).getScore(); - assertThat(i + " expected: " + first + " actual: " + actual, Float.compare(first, actual), equalTo(0)); + final int finalI = i; + assertResponse( + prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + float actual = response.getHits().getAt(0).getScore(); + assertThat(finalI + " expected: " + first[0] + " actual: " + actual, Float.compare(first[0], actual), equalTo(0)); + } + ); } } public void testRangeQueryRangeFields_24744() throws Exception { assertAcked(prepareCreate("test").setMapping("int_range", "type=integer_range")); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) .get(); refresh(); RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); - SearchResponse searchResponse = prepareSearch("test").setQuery(range).get(); - assertHitCount(searchResponse, 1); + assertHitCount(prepareSearch("test").setQuery(range), 1L); } public void testNestedQueryWithFieldAlias() throws Exception { @@ -1728,21 +1788,20 @@ public void testFieldAliasesForMetaFields() throws Exception { .endObject(); assertAcked(prepareCreate("test").setMapping(mapping)); - IndexRequestBuilder indexRequest = client().prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); + IndexRequestBuilder indexRequest = prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); indexRandom(true, false, indexRequest); updateClusterSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), true)); try { - SearchResponse searchResponse = prepareSearch().setQuery(termQuery("routing-alias", "custom")) - .addDocValueField("id-alias") - .get(); - assertHitCount(searchResponse, 1L); + assertResponse(prepareSearch().setQuery(termQuery("routing-alias", "custom")).addDocValueField("id-alias"), response -> { + assertHitCount(response, 1L); - SearchHit hit = searchResponse.getHits().getAt(0); - assertEquals(2, hit.getFields().size()); - assertTrue(hit.getFields().containsKey("id-alias")); + SearchHit hit = response.getHits().getAt(0); + assertEquals(2, hit.getFields().size()); + assertTrue(hit.getFields().containsKey("id-alias")); - DocumentField field = hit.getFields().get("id-alias"); - assertThat(field.getValue().toString(), equalTo("1")); + DocumentField field = hit.getFields().get("id-alias"); + assertThat(field.getValue().toString(), equalTo("1")); + }); } finally { // unset cluster setting updateClusterSettings(Settings.builder().putNull(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey())); @@ -1762,7 +1821,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { .build() ).setMapping("field1", "type=keyword,normalizer=lowercase_normalizer") ); - client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); + prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); { @@ -1787,7 +1846,7 @@ public void testWildcardQueryNormalizationOnTextField() { .build() ).setMapping("field1", "type=text,analyzer=lowercase_analyzer") ); - client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); + prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); { @@ -1817,7 +1876,7 @@ public void testWildcardQueryNormalizationKeywordSpecialCharacters() { .build() ).setMapping("field", "type=keyword,normalizer=no_wildcard") ); - client().prepareIndex("test").setId("1").setSource("field", "label-1").get(); + prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); @@ -1869,7 +1928,7 @@ public Map> getTokenizers() { */ public void testIssueFuzzyInsideSpanMulti() { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); + prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); ensureGreen(); refresh(); @@ -1881,16 +1940,17 @@ public void testFetchIdFieldQuery() { createIndex("test"); int docCount = randomIntBetween(10, 50); for (int i = 0; i < docCount; i++) { - client().prepareIndex("test").setSource("field", "foobarbaz").get(); + prepareIndex("test").setSource("field", "foobarbaz").get(); } ensureGreen(); refresh(); - SearchResponse response = prepareSearch("test").addFetchField("_id").setSize(docCount).get(); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(docCount, hits.length); - for (SearchHit hit : hits) { - assertNotNull(hit.getFields().get("_id").getValue()); - } + assertResponse(prepareSearch("test").addFetchField("_id").setSize(docCount), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(docCount, hits.length); + for (SearchHit hit : hits) { + assertNotNull(hit.getFields().get("_id").getValue()); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 78d98b76b9bc8..449777580b691 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; @@ -49,7 +48,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -77,36 +77,43 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept indexRandom( true, false, - client().prepareIndex("test").setId("1").setSource("body", "foo"), - client().prepareIndex("test").setId("2").setSource("body", "bar"), - client().prepareIndex("test").setId("3").setSource("body", "foo bar"), - client().prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), - client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), - client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") + prepareIndex("test").setId("1").setSource("body", "foo"), + prepareIndex("test").setId("2").setSource("body", "bar"), + prepareIndex("test").setId("3").setSource("body", "foo bar"), + prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), + prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), + prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo bar")), "1", "2", "3"); // Tests boost value setting. In this case doc 1 should always be ranked above the other // two matches. - SearchResponse searchResponse = prepareSearch().setQuery( - boolQuery().should(simpleQueryStringQuery("\"foo bar\"").boost(10.0f)).should(termQuery("body", "eggplant")) - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("3")); - + assertResponse( + prepareSearch().setQuery( + boolQuery().should(simpleQueryStringQuery("\"foo bar\"").boost(10.0f)).should(termQuery("body", "eggplant")) + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("3")); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo bar").defaultOperator(Operator.AND)), "3"); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("\"quux baz\" +(eggplant | spaghetti)")), "4", "5"); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("eggplants").analyzer("mock_snowball")), "4"); - searchResponse = prepareSearch().setQuery( - simpleQueryStringQuery("spaghetti").field("body", 1000.0f).field("otherbody", 2.0f).queryName("myquery") - ).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("5")); - assertSearchHits(searchResponse, "5", "6"); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("myquery")); + assertResponse( + prepareSearch().setQuery( + simpleQueryStringQuery("spaghetti").field("body", 1000.0f).field("otherbody", 2.0f).queryName("myquery") + ), + response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasId("5")); + assertSearchHits(response, "5", "6"); + assertThat(response.getHits().getAt(0).getMatchedQueries()[0], equalTo("myquery")); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("spaghetti").field("*body")), "5", "6"); } @@ -117,10 +124,10 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { indexRandom( true, false, - client().prepareIndex("test").setId("1").setSource("body", "foo"), - client().prepareIndex("test").setId("2").setSource("body", "bar"), - client().prepareIndex("test").setId("3").setSource("body", "foo bar"), - client().prepareIndex("test").setId("4").setSource("body", "foo baz bar") + prepareIndex("test").setId("1").setSource("body", "foo"), + prepareIndex("test").setId("2").setSource("body", "bar"), + prepareIndex("test").setId("3").setSource("body", "foo bar"), + prepareIndex("test").setId("4").setSource("body", "foo baz bar") ); logger.info("--> query 1"); @@ -152,10 +159,10 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { indexRandom( true, false, - client().prepareIndex("test").setId("5").setSource("body2", "foo", "other", "foo"), - client().prepareIndex("test").setId("6").setSource("body2", "bar", "other", "foo"), - client().prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), - client().prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") + prepareIndex("test").setId("5").setSource("body2", "foo", "other", "foo"), + prepareIndex("test").setId("6").setSource("body2", "bar", "other", "foo"), + prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), + prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") ); logger.info("--> query 5"); @@ -205,7 +212,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { .endObject() ) ); - client().prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); + prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body")), "1"); @@ -218,12 +225,12 @@ public void testSimpleQueryStringFlags() throws ExecutionException, InterruptedE createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("body", "foo"), - client().prepareIndex("test").setId("2").setSource("body", "bar"), - client().prepareIndex("test").setId("3").setSource("body", "foo bar"), - client().prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), - client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), - client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") + prepareIndex("test").setId("1").setSource("body", "foo"), + prepareIndex("test").setId("2").setSource("body", "bar"), + prepareIndex("test").setId("3").setSource("body", "foo bar"), + prepareIndex("test").setId("4").setSource("body", "quux baz eggplant"), + prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), + prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); assertSearchHitsWithoutFailures( @@ -276,17 +283,19 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte createIndex("test1", "test2"); indexRandom( true, - client().prepareIndex("test1").setId("1").setSource("field", "foo"), - client().prepareIndex("test2").setId("10").setSource("field", 5) + prepareIndex("test1").setId("1").setSource("field", "foo"), + prepareIndex("test2").setId("10").setSource("field", 5) ); refresh(); - SearchResponse searchResponse = prepareSearch().setAllowPartialSearchResults(true) - .setQuery(simpleQueryStringQuery("foo").field("field")) - .get(); - assertFailures(searchResponse); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); + assertResponse( + prepareSearch().setAllowPartialSearchResults(true).setQuery(simpleQueryStringQuery("foo").field("field")), + response -> { + assertFailures(response); + assertHitCount(response, 1L); + assertSearchHits(response, "1"); + } + ); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("foo").field("field").lenient(true)), "1"); } @@ -295,8 +304,8 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte public void testLenientFlagBeingTooLenient() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("num", 1, "body", "foo bar baz"), - client().prepareIndex("test").setId("2").setSource("num", 2, "body", "eggplant spaghetti lasagna") + prepareIndex("test").setId("1").setSource("num", 1, "body", "foo bar baz"), + prepareIndex("test").setId("2").setSource("num", 2, "body", "eggplant spaghetti lasagna") ); BoolQueryBuilder q = boolQuery().should(simpleQueryStringQuery("bar").field("num").field("body").lenient(true)); @@ -320,22 +329,22 @@ public void testSimpleQueryStringAnalyzeWildcard() throws ExecutionException, In CreateIndexRequestBuilder mappingRequest = indicesAdmin().prepareCreate("test1").setMapping(mapping); mappingRequest.get(); - indexRandom(true, client().prepareIndex("test1").setId("1").setSource("location", "Köln")); + indexRandom(true, prepareIndex("test1").setId("1").setSource("location", "Köln")); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("Köln*").field("location")), "1"); } public void testSimpleQueryStringUsesFieldAnalyzer() throws Exception { - client().prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); - client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); + prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); + prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("123").field("foo").field("bar")), "1"); } public void testSimpleQueryStringOnIndexMetaField() throws Exception { - client().prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); - client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); + prepareIndex("test").setId("1").setSource("foo", 123, "bar", "abc").get(); + prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("test").field("_index")), "1", "2"); } @@ -356,7 +365,7 @@ public void testEmptySimpleQueryStringWithAnalysis() throws Exception { CreateIndexRequestBuilder mappingRequest = indicesAdmin().prepareCreate("test1").setMapping(mapping); mappingRequest.get(); - indexRandom(true, client().prepareIndex("test1").setId("1").setSource("body", "Some Text")); + indexRandom(true, prepareIndex("test1").setId("1").setSource("body", "Some Text")); refresh(); assertSearchHitsWithoutFailures(prepareSearch().setQuery(simpleQueryStringQuery("the*").field("body"))); @@ -368,22 +377,23 @@ public void testBasicAllQuery() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); + reqs.add(prepareIndex("test").setId("2").setSource("f2", "Bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); - assertHitCount(resp, 2L); - assertHits(resp.getHits(), "1", "3"); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); - assertHitCount(resp, 3L); - assertHits(resp.getHits(), "1", "2", "3"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar")), response -> { + assertHitCount(response, 2L); + assertHits(response.getHits(), "1", "3"); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> { + assertHitCount(response, 3L); + assertHits(response.getHits(), "1", "2", "3"); + }); } public void testWithDate() throws Exception { @@ -392,25 +402,26 @@ public void testWithDate() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testWithLotsOfTypes() throws Exception { @@ -419,29 +430,26 @@ public void testWithLotsOfTypes() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add( - client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1") - ); - reqs.add( - client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") - ); + reqs.add(prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02", "f_float", "1.7", "f_ip", "127.0.0.1")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")).get(); - assertHits(resp.getHits(), "1", "2"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")), response -> { + assertHits(response.getHits(), "1", "2"); + assertHitCount(response, 2L); + }); } public void testDocWithAllTypes() throws Exception { @@ -451,42 +459,38 @@ public void testDocWithAllTypes() throws Exception { List reqs = new ArrayList<>(); String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json"); - reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); + reqs.add(prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("19")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("19")), response -> assertHits(response.getHits(), "1")); // nested doesn't match because it's hidden - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")).get(); - assertHits(resp.getHits(), "1"); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")), + response -> assertHits(response.getHits(), "1") + ); // bool doesn't match - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("23")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1293")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("42")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")).get(); - assertHits(resp.getHits(), "1"); - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")).get(); - assertHits(resp.getHits(), "1"); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("23")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("42")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")), + response -> assertHits(response.getHits(), "1") + ); // binary doesn't match // suggest doesn't match // geo_point doesn't match // geo_shape doesn't match - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)).get(); - assertHits(resp.getHits(), "1"); + assertResponse( + prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)), + response -> assertHits(response.getHits(), "1") + ); } public void testKeywordWithWhitespace() throws Exception { @@ -495,18 +499,19 @@ public void testKeywordWithWhitespace() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); - reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); - reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); + reqs.add(prepareIndex("test").setId("1").setSource("f2", "Foo Bar")); + reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar")); + reqs.add(prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); - SearchResponse resp = prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); - assertHits(resp.getHits(), "3"); - assertHitCount(resp, 1L); - - resp = prepareSearch("test").setQuery(simpleQueryStringQuery("bar")).get(); - assertHits(resp.getHits(), "2", "3"); - assertHitCount(resp, 2L); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> { + assertHits(response.getHits(), "3"); + assertHitCount(response, 1L); + }); + assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar")), response -> { + assertHits(response.getHits(), "2", "3"); + assertHitCount(response, 2L); + }); } public void testAllFieldsWithSpecifiedLeniency() throws Exception { @@ -515,7 +520,7 @@ public void testAllFieldsWithSpecifiedLeniency() throws Exception { ensureGreen("test"); List reqs = new ArrayList<>(); - reqs.add(client().prepareIndex("test").setId("1").setSource("f_long", 1)); + reqs.add(prepareIndex("test").setId("1").setSource("f_long", 1)); indexRandom(true, false, reqs); SearchPhaseExecutionException e = expectThrows( @@ -531,16 +536,15 @@ public void testFieldAlias() throws Exception { ensureGreen("test"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasWithWildcardField() throws Exception { @@ -549,16 +553,15 @@ public void testFieldAliasWithWildcardField() throws Exception { ensureGreen("test"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); - indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); - indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); + indexRequests.add(prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); - SearchResponse response = prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")).get(); - - assertNoFailures(response); - assertHitCount(response, 2); - assertHits(response.getHits(), "2", "3"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")), response -> { + assertHitCount(response, 2); + assertHits(response.getHits(), "2", "3"); + }); } public void testFieldAliasOnDisallowedFieldType() throws Exception { @@ -567,16 +570,15 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { ensureGreen("test"); List indexRequests = new ArrayList<>(); - indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); + indexRequests.add(prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); // The wildcard field matches aliases for both a text and boolean field. // By default, the boolean field should be ignored when building the query. - SearchResponse response = prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")).get(); - - assertNoFailures(response); - assertHitCount(response, 1); - assertHits(response.getHits(), "1"); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(queryStringQuery("text").field("f*_alias")), response -> { + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + }); } private void assertHits(SearchHits hits, String... ids) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 1d13bea9e0639..20b9ce38254c3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -32,6 +31,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -55,7 +55,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { assertAcked(prepareCreate("test").setSettings(indexSettings(cluster().numDataNodes() + 2, 0))); ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); + prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); internalCluster().stopRandomDataNode(); @@ -67,21 +67,25 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { "_prefer_nodes:somenode,server2" }; for (String pref : preferences) { logger.info("--> Testing out preference={}", pref); - SearchResponse searchResponse = prepareSearch().setSize(0).setPreference(pref).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); - searchResponse = prepareSearch().setPreference(pref).get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); + assertResponse(prepareSearch().setSize(0).setPreference(pref), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); + }); + assertResponse(prepareSearch().setPreference(pref), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); + }); } // _only_local is a stricter preference, we need to send the request to a data node - SearchResponse searchResponse = dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local").get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); - searchResponse = dataNodeClient().prepareSearch().setPreference("_only_local").get(); - assertThat(RestStatus.OK, equalTo(searchResponse.status())); - assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); + assertResponse(dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); + }); + assertResponse(dataNodeClient().prepareSearch().setPreference("_only_local"), response -> { + assertThat(RestStatus.OK, equalTo(response.status())); + assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); + }); } public void testNoPreferenceRandom() { @@ -93,33 +97,43 @@ public void testNoPreferenceRandom() { ); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); final Client client = internalCluster().smartClient(); - SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); - String firstNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); - String secondNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - - assertThat(firstNodeId, not(equalTo(secondNodeId))); + assertResponse( + client.prepareSearch("test").setQuery(matchAllQuery()), + fist -> assertResponse( + client.prepareSearch("test").setQuery(matchAllQuery()), + second -> assertThat( + fist.getHits().getAt(0).getShard().getNodeId(), + not(equalTo(second.getHits().getAt(0).getShard().getNodeId())) + ) + ) + ); } public void testSimplePreference() { indicesAdmin().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", XContentType.JSON).get(); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setPreference("_local").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setPreference("_local"), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setPreference("1234").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setPreference("1234"), + response -> assertThat(response.getHits().getTotalHits().value, equalTo(1L)) + ); } public void testThatSpecifyingNonExistingNodesReturnsUsefulError() { @@ -142,7 +156,7 @@ public void testNodesOnlyRandom() { ) ); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); final Client client = internalCluster().smartClient(); @@ -188,9 +202,10 @@ public void testNodesOnlyRandom() { private void assertSearchOnRandomNodes(SearchRequestBuilder request) { Set hitNodes = new HashSet<>(); for (int i = 0; i < 2; i++) { - SearchResponse searchResponse = request.get(); - assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); - hitNodes.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); + assertResponse(request, response -> { + assertThat(response.getHits().getHits().length, greaterThan(0)); + hitNodes.add(response.getHits().getAt(0).getShard().getNodeId()); + }); } assertThat(hitNodes.size(), greaterThan(1)); } @@ -212,7 +227,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { ) ); ensureGreen(); - client().prepareIndex("test").setSource("field1", "value1").get(); + prepareIndex("test").setSource("field1", "value1").get(); refresh(); final String customPreference = randomAlphaOfLength(10); @@ -259,8 +274,9 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { } private static void assertSearchesSpecificNode(String index, String customPreference, String nodeId) { - final SearchResponse searchResponse = prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference).get(); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId)); + assertResponse(prepareSearch(index).setQuery(matchAllQuery()).setPreference(customPreference), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + assertThat(response.getHits().getAt(0).getShard().getNodeId(), equalTo(nodeId)); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java index 35ea9614d182a..1362b0166a709 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; @@ -23,6 +22,7 @@ import java.util.Set; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -49,18 +49,18 @@ public void testNodeSelection() { // Before we've gathered stats for all nodes, we should try each node once. Set nodeIds = new HashSet<>(); - SearchResponse searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - nodeIds.add(searchResponse.getHits().getAt(0).getShard().getNodeId()); - + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); + }); assertEquals(3, nodeIds.size()); // Now after more searches, we should select a node with the lowest ARS rank. @@ -78,13 +78,14 @@ public void testNodeSelection() { assertNotNull(nodeStats); assertEquals(3, nodeStats.getAdaptiveSelectionStats().getComputedStats().size()); - searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - String selectedNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId(); - double selectedRank = nodeStats.getAdaptiveSelectionStats().getRanks().get(selectedNodeId); + assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + String selectedNodeId = response.getHits().getAt(0).getShard().getNodeId(); + double selectedRank = nodeStats.getAdaptiveSelectionStats().getRanks().get(selectedNodeId); - for (Map.Entry entry : nodeStats.getAdaptiveSelectionStats().getRanks().entrySet()) { - double rank = entry.getValue(); - assertThat(rank, greaterThanOrEqualTo(selectedRank)); - } + for (Map.Entry entry : nodeStats.getAdaptiveSelectionStats().getRanks().entrySet()) { + double rank = entry.getValue(); + assertThat(rank, greaterThanOrEqualTo(selectedRank)); + } + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java index dc460468db605..4c99becad055e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.scriptfilter; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -37,6 +36,7 @@ import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -102,31 +102,30 @@ public void testCustomScriptBinaryField() throws Exception { final byte[] randomBytesDoc2 = getRandomBytes(16); assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("binary")).setSettings(indexSettings())); - client().prepareIndex("my-index") - .setId("1") + prepareIndex("my-index").setId("1") .setSource(jsonBuilder().startObject().field("binaryData", Base64.getEncoder().encodeToString(randomBytesDoc1)).endObject()) .get(); flush(); - client().prepareIndex("my-index") - .setId("2") + prepareIndex("my-index").setId("2") .setSource(jsonBuilder().startObject().field("binaryData", Base64.getEncoder().encodeToString(randomBytesDoc2)).endObject()) .get(); flush(); refresh(); - SearchResponse response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length > 15", emptyMap())) - ) - .addScriptField( - "sbinaryData", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length", emptyMap()) + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length > 15", emptyMap())) ) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getFields().get("sbinaryData").getValues().get(0), equalTo(16)); - + .addScriptField( + "sbinaryData", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length", emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getFields().get("sbinaryData").getValues().get(0), equalTo(16)); + } + ); } private byte[] getRandomBytes(int len) { @@ -151,68 +150,78 @@ private XContentBuilder createMappingSource(String fieldType) throws IOException public void testCustomScriptBoost() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject()) .get(); flush(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).endObject()) .get(); flush(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject()) .get(); refresh(); logger.info("running doc['num1'].value > 1"); - SearchResponse response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("3")); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); Map params = new HashMap<>(); params.put("param1", 2); logger.info("running doc['num1'].value > param1"); - response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("3")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); - + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("3")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); params = new HashMap<>(); params.put("param1", -1); logger.info("running doc['num1'].value > param1"); - response = prepareSearch().setQuery( - scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) - ) - .addSort("num1", SortOrder.ASC) - .addScriptField("sNum1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap())) - .get(); - - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + assertResponse( + prepareSearch().setQuery( + scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)) + ) + .addSort("num1", SortOrder.ASC) + .addScriptField( + "sNum1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getFields().get("sNum1").getValues().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getFields().get("sNum1").getValues().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); + } + ); } public void testDisallowExpensiveQueries() { @@ -220,7 +229,7 @@ public void testDisallowExpensiveQueries() { assertAcked(prepareCreate("test-index").setMapping("num1", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { - client().prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); + prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java index c63aa19beb42e..e89e51a60fa23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -130,7 +130,7 @@ private TestContext create(SearchType... searchTypes) throws Exception { } for (int i = 1; i <= numDocs; i++) { - IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index").setId(String.valueOf(i)); + IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId(String.valueOf(i)); if (missingDocs.contains(i)) { indexRequestBuilder.setSource("x", "y"); } else { @@ -205,7 +205,7 @@ private int createIndex(boolean singleShard) throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource("foo", random().nextBoolean()); + builders[i] = prepareIndex("test").setId(Integer.toString(i)).setSource("foo", random().nextBoolean()); } indexRandom(true, builders); return numDocs; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index f94e59cbe1ab4..e8b3cfdb1768a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -73,10 +73,7 @@ public void testSimpleScrollQueryThenFetch() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } indicesAdmin().prepareRefresh().get(); @@ -128,7 +125,7 @@ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws E } else if (i > 60) { routing = "2"; } - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).setRouting(routing).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).setRouting(routing).get(); } indicesAdmin().prepareRefresh().get(); @@ -186,8 +183,7 @@ public void testScrollAndUpdateIndex() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 500; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("user", "kimchy") @@ -216,7 +212,7 @@ public void testScrollAndUpdateIndex() throws Exception { for (SearchHit searchHit : searchResponse.getHits().getHits()) { Map map = searchHit.getSourceAsMap(); map.put("message", "update"); - client().prepareIndex("test").setId(searchHit.getId()).setSource(map).get(); + prepareIndex("test").setId(searchHit.getId()).setSource(map).get(); } searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).get(); } while (searchResponse.getHits().getHits().length > 0); @@ -245,10 +241,7 @@ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } indicesAdmin().prepareRefresh().get(); @@ -363,10 +356,7 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } indicesAdmin().prepareRefresh().get(); @@ -436,7 +426,7 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { * Tests that we use an optimization shrinking the batch to the size of the shard. Thus the Integer.MAX_VALUE window doesn't OOM us. */ public void testDeepScrollingDoesNotBlowUp() throws Exception { - client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); /* * Disable the max result window setting for this test because it'll reject the search's unreasonable batch size. We want * unreasonable batch sizes to just OOM. @@ -462,7 +452,7 @@ public void testDeepScrollingDoesNotBlowUp() throws Exception { } public void testThatNonExistingScrollIdReturnsCorrectException() throws Exception { - client().prepareIndex("index").setId("1").setSource("field", "value").execute().get(); + prepareIndex("index").setId("1").setSource("field", "value").execute().get(); refresh(); SearchResponse searchResponse = prepareSearch("index").setSize(1).setScroll("1m").get(); @@ -478,7 +468,7 @@ public void testStringSortMissingAscTerminates() throws Exception { assertAcked( prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping("no_field", "type=keyword", "some_field", "type=keyword") ); - client().prepareIndex("test").setId("1").setSource("some_field", "test").get(); + prepareIndex("test").setId("1").setSource("some_field", "test").get(); refresh(); SearchResponse response = prepareSearch("test") @@ -510,7 +500,7 @@ public void testStringSortMissingAscTerminates() throws Exception { public void testCloseAndReopenOrDeleteWithActiveScroll() { createIndex("test"); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).get(); } refresh(); SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) @@ -566,10 +556,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { public void testInvalidScrollKeepAlive() throws IOException { createIndex("test"); for (int i = 0; i < 2; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); } refresh(); updateClusterSettings(Settings.builder().put("search.default_keep_alive", "5m").put("search.max_keep_alive", "5m")); @@ -612,9 +599,9 @@ public void testScrollRewrittenToMatchNoDocs() { {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}} """) ); - client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); - client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); - client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); + prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); + prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); + prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); indicesAdmin().prepareRefresh("test").get(); SearchResponse resp = null; try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java index 96c007e05e414..23a38c0608490 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -53,7 +53,7 @@ public void testScanScrollWithShardExceptions() throws Exception { List writes = new ArrayList<>(); for (int i = 0; i < 100; i++) { - writes.add(client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", i).endObject())); + writes.add(prepareIndex("test").setSource(jsonBuilder().startObject().field("field", i).endObject())); } indexRandom(false, writes); refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index 3ac8b103ce910..6219c1b72253a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -12,15 +12,15 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Randomness; @@ -63,7 +63,7 @@ public class SearchAfterIT extends ESIntegTestCase { public void testsShouldFail() throws Exception { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); ensureGreen(); - indexRandom(true, client().prepareIndex("test").setId("0").setSource("field1", 0, "field2", "toto")); + indexRandom(true, prepareIndex("test").setId("0").setSource("field1", 0, "field2", "toto")); { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, @@ -154,8 +154,8 @@ public void testWithNullStrings() throws InterruptedException { ensureGreen(); indexRandom( true, - client().prepareIndex("test").setId("0").setSource("field1", 0), - client().prepareIndex("test").setId("1").setSource("field1", 100, "field2", "toto") + prepareIndex("test").setId("0").setSource("field1", 0), + prepareIndex("test").setId("1").setSource("field1", 100, "field2", "toto") ); SearchResponse searchResponse = prepareSearch("test").addSort("field1", SortOrder.ASC) .addSort("field2", SortOrder.ASC) @@ -314,7 +314,7 @@ private void assertSearchFromWithSortValues(String indexName, List> builder.field("field" + Integer.toString(j), documents.get(i).get(j)); } builder.endObject(); - requests.add(client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource(builder)); + requests.add(prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource(builder)); } indexRandom(true, requests); } @@ -456,7 +456,7 @@ public void testScrollAndSearchAfterWithBigIndex() { String pitID; { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); - pitID = client().execute(OpenPointInTimeAction.INSTANCE, openPITRequest).actionGet().getPointInTimeId(); + pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); SearchRequest searchRequest = new SearchRequest("test").source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort("timestamp") @@ -483,14 +483,14 @@ public void testScrollAndSearchAfterWithBigIndex() { } while (resp.getHits().getHits().length > 0); assertThat(foundHits, equalTo(timestamps.size())); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitID)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitID)).actionGet(); } } // search_after without sort with point in time { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); - pitID = client().execute(OpenPointInTimeAction.INSTANCE, openPITRequest).actionGet().getPointInTimeId(); + pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); SearchRequest searchRequest = new SearchRequest("test").source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort(SortBuilders.pitTiebreaker()) @@ -517,7 +517,7 @@ public void testScrollAndSearchAfterWithBigIndex() { Collections.sort(foundSeqNos); assertThat(foundSeqNos, equalTo(timestamps)); } finally { - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pitID)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitID)).actionGet(); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 61490cac43e45..f47303b83b6e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -47,7 +46,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -76,12 +76,12 @@ public void testSearchRandomPreference() throws InterruptedException, ExecutionE createIndex("test"); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field", "value"), - client().prepareIndex("test").setId("2").setSource("field", "value"), - client().prepareIndex("test").setId("3").setSource("field", "value"), - client().prepareIndex("test").setId("4").setSource("field", "value"), - client().prepareIndex("test").setId("5").setSource("field", "value"), - client().prepareIndex("test").setId("6").setSource("field", "value") + prepareIndex("test").setId("1").setSource("field", "value"), + prepareIndex("test").setId("2").setSource("field", "value"), + prepareIndex("test").setId("3").setSource("field", "value"), + prepareIndex("test").setId("4").setSource("field", "value"), + prepareIndex("test").setId("5").setSource("field", "value"), + prepareIndex("test").setId("6").setSource("field", "value") ); int iters = scaledRandomIntBetween(10, 20); @@ -117,7 +117,7 @@ public void testSimpleIp() throws Exception { ) .get(); - client().prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefreshPolicy(IMMEDIATE).get(); assertHitCount( prepareSearch().setQuery(boolQuery().must(rangeQuery("from").lte("192.168.0.7")).must(rangeQuery("to").gte("192.168.0.7"))), 1L @@ -143,11 +143,11 @@ public void testIpCidr() throws Exception { .get(); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); - client().prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); - client().prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); - client().prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); - client().prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); + prepareIndex("test").setId("1").setSource("ip", "192.168.0.1").get(); + prepareIndex("test").setId("2").setSource("ip", "192.168.0.2").get(); + prepareIndex("test").setId("3").setSource("ip", "192.168.0.3").get(); + prepareIndex("test").setId("4").setSource("ip", "192.168.1.4").get(); + prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); refresh(); assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))), 1L); @@ -171,7 +171,7 @@ public void testIpCidr() throws Exception { public void testSimpleId() { createIndex("test"); - client().prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); // id is not indexed, but lets see that we automatically convert to assertHitCount(prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")), 1L); assertHitCount(prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")), 1L); @@ -179,9 +179,9 @@ public void testSimpleId() { public void testSimpleDateRange() throws Exception { createIndex("test"); - client().prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); - client().prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); - client().prepareIndex("test").setId("3").setSource("field", "1967-01-01T00:00").get(); + prepareIndex("test").setId("1").setSource("field", "2010-01-05T02:00").get(); + prepareIndex("test").setId("2").setSource("field", "2010-01-06T02:00").get(); + prepareIndex("test").setId("3").setSource("field", "1967-01-01T00:00").get(); ensureGreen(); refresh(); assertHitCountAndNoFailures( @@ -210,12 +210,12 @@ public void testSimpleDateRange() throws Exception { assertHitCountAndNoFailures(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt("1000")), 3L); // a numeric value of 1000 should be parsed as 1000 millis since epoch and return only docs after 1970 - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt(1000)).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2L); - String[] expectedIds = new String[] { "1", "2" }; - assertThat(searchResponse.getHits().getHits()[0].getId(), is(oneOf(expectedIds))); - assertThat(searchResponse.getHits().getHits()[1].getId(), is(oneOf(expectedIds))); + assertNoFailuresAndResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt(1000)), response -> { + assertHitCount(response, 2L); + String[] expectedIds = new String[] { "1", "2" }; + assertThat(response.getHits().getHits()[0].getId(), is(oneOf(expectedIds))); + assertThat(response.getHits().getHits()[1].getId(), is(oneOf(expectedIds))); + }); } public void testRangeQueryKeyword() throws Exception { @@ -223,10 +223,10 @@ public void testRangeQueryKeyword() throws Exception { indicesAdmin().preparePutMapping("test").setSource("field", "type=keyword").get(); - client().prepareIndex("test").setId("0").setSource("field", "").get(); - client().prepareIndex("test").setId("1").setSource("field", "A").get(); - client().prepareIndex("test").setId("2").setSource("field", "B").get(); - client().prepareIndex("test").setId("3").setSource("field", "C").get(); + prepareIndex("test").setId("0").setSource("field", "").get(); + prepareIndex("test").setId("1").setSource("field", "A").get(); + prepareIndex("test").setId("2").setSource("field", "B").get(); + prepareIndex("test").setId("3").setSource("field", "C").get(); ensureGreen(); refresh(); @@ -248,24 +248,30 @@ public void testSimpleTerminateAfterCount() throws Exception { for (int i = 1; i <= max; i++) { String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + docbuilders.add(prepareIndex("test").setId(id).setSource("field", i)); } indexRandom(true, docbuilders); ensureGreen(); refresh(); - SearchResponse searchResponse; for (int i = 1; i < max; i++) { - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(i).get(); - assertHitCount(searchResponse, i); - assertTrue(searchResponse.isTerminatedEarly()); + final int finalI = i; + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(i), + response -> { + assertHitCount(response, finalI); + assertTrue(response.isTerminatedEarly()); + } + ); } - - searchResponse = prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(2 * max).get(); - - assertHitCount(searchResponse, max); - assertFalse(searchResponse.isTerminatedEarly()); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(2 * max), + response -> { + assertHitCount(response, max); + assertFalse(response.isTerminatedEarly()); + } + ); } public void testSimpleIndexSortEarlyTerminate() throws Exception { @@ -276,30 +282,30 @@ public void testSimpleIndexSortEarlyTerminate() throws Exception { for (int i = max - 1; i >= 0; i--) { String id = String.valueOf(i); - docbuilders.add(client().prepareIndex("test").setId(id).setSource("rank", i)); + docbuilders.add(prepareIndex("test").setId(id).setSource("rank", i)); } indexRandom(true, docbuilders); ensureGreen(); refresh(); - SearchResponse searchResponse; for (int i = 1; i < max; i++) { - searchResponse = prepareSearch("test").addDocValueField("rank") - .setTrackTotalHits(false) - .addSort("rank", SortOrder.ASC) - .setSize(i) - .get(); - assertNull(searchResponse.getHits().getTotalHits()); - for (int j = 0; j < i; j++) { - assertThat(searchResponse.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); - } + final int finalI = i; + assertResponse( + prepareSearch("test").addDocValueField("rank").setTrackTotalHits(false).addSort("rank", SortOrder.ASC).setSize(i), + response -> { + assertNull(response.getHits().getTotalHits()); + for (int j = 0; j < finalI; j++) { + assertThat(response.getHits().getAt(j).field("rank").getValue(), equalTo((long) j)); + } + } + ); } } public void testInsaneFromAndSize() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertWindowFails(prepareSearch("idx").setFrom(Integer.MAX_VALUE)); assertWindowFails(prepareSearch("idx").setSize(Integer.MAX_VALUE)); @@ -307,7 +313,7 @@ public void testInsaneFromAndSize() throws Exception { public void testTooLargeFromAndSize() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertWindowFails(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); assertWindowFails(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); @@ -319,7 +325,7 @@ public void testTooLargeFromAndSize() throws Exception { public void testLargeFromAndSizeSucceeds() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); @@ -335,7 +341,7 @@ public void testTooLargeFromAndSizeOkBySetting() throws Exception { Settings.builder() .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), 1); @@ -353,7 +359,7 @@ public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2), "idx" ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), 1); @@ -366,7 +372,7 @@ public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), 1); assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), 1); @@ -379,7 +385,7 @@ public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws public void testTooLargeRescoreWindow() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertRescoreWindowFails(Integer.MAX_VALUE); assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); @@ -389,7 +395,7 @@ public void testTooLargeRescoreOkBySetting() throws Exception { int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -403,7 +409,7 @@ public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { defaultMaxWindow * 2 ) ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -412,7 +418,7 @@ public void testTooLargeRescoreOkByDynamicSetting() throws Exception { int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); createIndex("idx"); updateIndexSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2), "idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -425,7 +431,7 @@ public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), defaultMaxWindow * 2), "idx" ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); assertHitCount(prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)), 1); } @@ -446,8 +452,7 @@ public void testTermQueryBigInt() throws Exception { prepareCreate("idx").setMapping("field", "type=keyword").get(); ensureGreen("idx"); - client().prepareIndex("idx") - .setId("1") + prepareIndex("idx").setId("1") .setSource("{\"field\" : 80315953321748200608 }", XContentType.JSON) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); @@ -461,7 +466,7 @@ public void testTermQueryBigInt() throws Exception { public void testTooLongRegexInRegexpQuery() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index 948b7261ded1c..527d8bed8bc68 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -10,14 +10,14 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.ClosePointInTimeAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -79,7 +79,7 @@ private void setupIndex(int numDocs, int numberOfShards) throws IOException, Exe .field("static_int", 0) .field("invalid_random_int", randomInt()) .endObject(); - requests.add(client().prepareIndex("test").setSource(builder)); + requests.add(prepareIndex("test").setSource(builder)); } indexRandom(true, requests); } @@ -197,7 +197,7 @@ public void testPointInTime() throws Exception { for (String field : new String[] { null, "random_int", "static_int" }) { // Open point-in-time reader OpenPointInTimeRequest request = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueSeconds(10)); - OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); String pointInTimeId = response.getPointInTimeId(); // Test sort on document IDs @@ -206,7 +206,7 @@ public void testPointInTime() throws Exception { assertSearchSlicesWithPointInTime(field, "random_int", pointInTimeId, max, numDocs); // Close point-in-time reader - client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(pointInTimeId)).actionGet(); + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pointInTimeId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 2926d36becb4a..2cd68398e211f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -63,6 +63,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -115,35 +117,37 @@ public void testIssue8226() { assertAcked(prepareCreate("test_" + i).addAlias(new Alias("test"))); } if (i > 0) { - client().prepareIndex("test_" + i).setId("" + i).setSource("{\"entry\": " + i + "}", XContentType.JSON).get(); + prepareIndex("test_" + i).setId("" + i).setSource("{\"entry\": " + i + "}", XContentType.JSON).get(); } } refresh(); // sort DESC - SearchResponse searchResponse = prepareSearch().addSort( - new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long") - ).setSize(10).get(); - logClusterState(); - assertNoFailures(searchResponse); - - for (int j = 1; j < searchResponse.getHits().getHits().length; j++) { - Number current = (Number) searchResponse.getHits().getHits()[j].getSourceAsMap().get("entry"); - Number previous = (Number) searchResponse.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); - assertThat(searchResponse.toString(), current.intValue(), lessThan(previous.intValue())); - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long")) + .setSize(10), + response -> { + logClusterState(); + for (int j = 1; j < response.getHits().getHits().length; j++) { + Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry"); + Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); + assertThat(response.toString(), current.intValue(), lessThan(previous.intValue())); + } + } + ); // sort ASC - searchResponse = prepareSearch().addSort( - new FieldSortBuilder("entry").order(SortOrder.ASC).unmappedType(useMapping ? null : "long") - ).setSize(10).get(); - logClusterState(); - assertNoFailures(searchResponse); - - for (int j = 1; j < searchResponse.getHits().getHits().length; j++) { - Number current = (Number) searchResponse.getHits().getHits()[j].getSourceAsMap().get("entry"); - Number previous = (Number) searchResponse.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); - assertThat(searchResponse.toString(), current.intValue(), greaterThan(previous.intValue())); - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("entry").order(SortOrder.ASC).unmappedType(useMapping ? null : "long")) + .setSize(10), + response -> { + logClusterState(); + for (int j = 1; j < response.getHits().getHits().length; j++) { + Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry"); + Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry"); + assertThat(response.toString(), current.intValue(), greaterThan(previous.intValue())); + } + } + ); } public void testIssue6614() throws ExecutionException, InterruptedException { @@ -159,46 +163,52 @@ public void testIssue6614() throws ExecutionException, InterruptedException { final int numDocs = randomIntBetween(1, 23); // hour of the day for (int j = 0; j < numDocs; j++) { builders.add( - client().prepareIndex(indexId) - .setSource( - "foo", - "bar", - "timeUpdated", - "2014/07/" + Strings.format("%02d", i + 1) + " " + Strings.format("%02d", j + 1) + ":00:00" - ) + prepareIndex(indexId).setSource( + "foo", + "bar", + "timeUpdated", + "2014/07/" + Strings.format("%02d", i + 1) + " " + Strings.format("%02d", j + 1) + ":00:00" + ) ); } indexRandom(true, builders); docs += builders.size(); builders.clear(); } - SearchResponse allDocsResponse = prepareSearch().setQuery( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery("foo", "bar")) - .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")) - ).addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")).setSize(docs).get(); - assertNoFailures(allDocsResponse); - - final int numiters = randomIntBetween(1, 20); - for (int i = 0; i < numiters; i++) { - SearchResponse searchResponse = prepareSearch().setQuery( + final int finalDocs = docs; + assertNoFailuresAndResponse( + prepareSearch().setQuery( QueryBuilders.boolQuery() .must(QueryBuilders.termQuery("foo", "bar")) - .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/" + Strings.format("%02d", randomIntBetween(1, 7)) + "/01")) - ) - .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) - .setSize(scaledRandomIntBetween(1, docs)) - .get(); - assertNoFailures(searchResponse); - for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { - assertThat( - searchResponse.toString() + "\n vs. \n" + allDocsResponse.toString(), - searchResponse.getHits().getHits()[j].getId(), - equalTo(allDocsResponse.getHits().getHits()[j].getId()) - ); + .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")) + ).addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")).setSize(docs), + allDocsResponse -> { + final int numiters = randomIntBetween(1, 20); + for (int i = 0; i < numiters; i++) { + assertNoFailuresAndResponse( + prepareSearch().setQuery( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("foo", "bar")) + .must( + QueryBuilders.rangeQuery("timeUpdated") + .gte("2014/" + Strings.format("%02d", randomIntBetween(1, 7)) + "/01") + ) + ) + .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) + .setSize(scaledRandomIntBetween(1, finalDocs)), + response -> { + for (int j = 0; j < response.getHits().getHits().length; j++) { + assertThat( + response.toString() + "\n vs. \n" + allDocsResponse.toString(), + response.getHits().getHits()[j].getId(), + equalTo(allDocsResponse.getHits().getHits()[j].getId()) + ); + } + } + ); + } } - } - + ); } public void testTrackScores() throws Exception { @@ -214,20 +224,19 @@ public void testTrackScores() throws Exception { ); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getMaxScore(), equalTo(Float.NaN)); - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getScore(), equalTo(Float.NaN)); - } - + assertResponse(prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC), response -> { + assertThat(response.getHits().getMaxScore(), equalTo(Float.NaN)); + for (SearchHit hit : response.getHits()) { + assertThat(hit.getScore(), equalTo(Float.NaN)); + } + }); // now check with score tracking - searchResponse = prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).setTrackScores(true).get(); - - assertThat(searchResponse.getHits().getMaxScore(), not(equalTo(Float.NaN))); - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getScore(), not(equalTo(Float.NaN))); - } + assertResponse(prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).setTrackScores(true), response -> { + assertThat(response.getHits().getMaxScore(), not(equalTo(Float.NaN))); + for (SearchHit hit : response.getHits()) { + assertThat(hit.getScore(), not(equalTo(Float.NaN))); + } + }); } public void testRandomSorting() throws IOException, InterruptedException, ExecutionException { @@ -268,45 +277,47 @@ public void testRandomSorting() throws IOException, InterruptedException, Execut sparseBytes.put(ref, docId); } src.endObject(); - builders[i] = client().prepareIndex("test").setId(docId).setSource(src); + builders[i] = prepareIndex("test").setId(docId).setSource(src); } indexRandom(true, builders); { int size = between(1, denseBytes.size()); - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .setSize(size) - .addSort("dense_bytes", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - Set> entrySet = denseBytes.entrySet(); - Iterator> iterator = entrySet.iterator(); - for (int i = 0; i < size; i++) { - assertThat(iterator.hasNext(), equalTo(true)); - Entry next = iterator.next(); - assertThat("pos: " + i, searchResponse.getHits().getAt(i).getId(), equalTo(next.getValue())); - assertThat(searchResponse.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); - } + assertNoFailuresAndResponse( + prepareSearch("test").setQuery(matchAllQuery()).setSize(size).addSort("dense_bytes", SortOrder.ASC), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) numDocs)); + assertThat(response.getHits().getHits().length, equalTo(size)); + Set> entrySet = denseBytes.entrySet(); + Iterator> iterator = entrySet.iterator(); + for (int i = 0; i < size; i++) { + assertThat(iterator.hasNext(), equalTo(true)); + Entry next = iterator.next(); + assertThat("pos: " + i, response.getHits().getAt(i).getId(), equalTo(next.getValue())); + assertThat(response.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); + } + } + ); } if (sparseBytes.isEmpty() == false) { int size = between(1, sparseBytes.size()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.existsQuery("sparse_bytes")) - .setSize(size) - .addSort("sparse_bytes", SortOrder.ASC) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) sparseBytes.size())); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - Set> entrySet = sparseBytes.entrySet(); - Iterator> iterator = entrySet.iterator(); - for (int i = 0; i < size; i++) { - assertThat(iterator.hasNext(), equalTo(true)); - Entry next = iterator.next(); - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(next.getValue())); - assertThat(searchResponse.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .setPostFilter(QueryBuilders.existsQuery("sparse_bytes")) + .setSize(size) + .addSort("sparse_bytes", SortOrder.ASC), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo((long) sparseBytes.size())); + assertThat(response.getHits().getHits().length, equalTo(size)); + Set> entrySet = sparseBytes.entrySet(); + Iterator> iterator = entrySet.iterator(); + for (int i = 0; i < size; i++) { + assertThat(iterator.hasNext(), equalTo(true)); + Entry next = iterator.next(); + assertThat(response.getHits().getAt(i).getId(), equalTo(next.getValue())); + assertThat(response.getHits().getAt(i).getSortValues()[0].toString(), equalTo(next.getKey().utf8ToString())); + } + } + ); } } @@ -315,142 +326,161 @@ public void test3078() { ensureGreen(); for (int i = 1; i < 101; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", Integer.toString(i)).get(); + prepareIndex("test").setId(Integer.toString(i)).setSource("field", Integer.toString(i)).get(); } refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // reindex and refresh - client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); + prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); refresh(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // reindex - no refresh - client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); + + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); // force merge forceMerge(); refresh(); - client().prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - + prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); refresh(); - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)) - .get(); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + } + ); } public void testScoreSortDirection() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", 2).get(); - client().prepareIndex("test").setId("2").setSource("field", 1).get(); - client().prepareIndex("test").setId("3").setSource("field", 0).get(); + prepareIndex("test").setId("1").setSource("field", 2).get(); + prepareIndex("test").setId("2").setSource("field", 1).get(); + prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC).get(); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ).addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery( + QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) + ).addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + } + ); } public void testScoreSortDirectionWithFunctionScore() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", 2).get(); - client().prepareIndex("test").setId("2").setSource("field", 1).get(); - client().prepareIndex("test").setId("3").setSource("field", 0).get(); + prepareIndex("test").setId("1").setSource("field", 2).get(); + prepareIndex("test").setId("2").setSource("field", 1).get(); + prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); - SearchResponse searchResponse = prepareSearch("test").setQuery( - functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field")) - ).get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) - .addSort("_score", SortOrder.DESC) - .get(); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(0).getScore())); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getScore(), Matchers.lessThan(searchResponse.getHits().getAt(1).getScore())); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - - searchResponse = prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) - .addSort("_score", SortOrder.DESC) - .get(); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); + assertResponse(prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))), response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + }); + assertResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) + .addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); + assertResponse( + prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) + .addSort("_score", SortOrder.DESC), + response -> { + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + } + ); } public void testIssue2986() { assertAcked(indicesAdmin().prepareCreate("test").setMapping("field1", "type=keyword").get()); - client().prepareIndex("test").setId("1").setSource("{\"field1\":\"value1\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("2").setSource("{\"field1\":\"value2\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("3").setSource("{\"field1\":\"value3\"}", XContentType.JSON).get(); + prepareIndex("test").setId("1").setSource("{\"field1\":\"value1\"}", XContentType.JSON).get(); + prepareIndex("test").setId("2").setSource("{\"field1\":\"value2\"}", XContentType.JSON).get(); + prepareIndex("test").setId("3").setSource("{\"field1\":\"value3\"}", XContentType.JSON).get(); refresh(); - SearchResponse result = prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC).get(); - - for (SearchHit hit : result.getHits()) { - assertFalse(Float.isNaN(hit.getScore())); - } + assertResponse(prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC), response -> { + for (SearchHit hit : response.getHits()) { + assertFalse(Float.isNaN(hit.getScore())); + } + }); } public void testIssue2991() { @@ -462,35 +492,36 @@ public void testIssue2991() { } assertAcked(indicesAdmin().prepareCreate("test").setMapping("tag", "type=keyword").get()); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("tag", "alpha").get(); + prepareIndex("test").setId("1").setSource("tag", "alpha").get(); refresh(); - client().prepareIndex("test").setId("3").setSource("tag", "gamma").get(); + prepareIndex("test").setId("3").setSource("tag", "gamma").get(); refresh(); - client().prepareIndex("test").setId("4").setSource("tag", "delta").get(); + prepareIndex("test").setId("4").setSource("tag", "delta").get(); refresh(); - client().prepareIndex("test").setId("2").setSource("tag", "beta").get(); + prepareIndex("test").setId("2").setSource("tag", "beta").get(); refresh(); - SearchResponse resp = prepareSearch("test").setSize(2) - .setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)) - .get(); - assertHitCount(resp, 4); - assertThat(resp.getHits().getHits().length, equalTo(2)); - assertFirstHit(resp, hasId("1")); - assertSecondHit(resp, hasId("2")); - - resp = prepareSearch("test").setSize(2) - .setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)) - .get(); - assertHitCount(resp, 4); - assertThat(resp.getHits().getHits().length, equalTo(2)); - assertFirstHit(resp, hasId("3")); - assertSecondHit(resp, hasId("4")); + assertResponse( + prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)), + response -> { + assertHitCount(response, 4); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + } + ); + assertResponse( + prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)), + response -> { + assertHitCount(response, 4); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertFirstHit(response, hasId("3")); + assertSecondHit(response, hasId("4")); + } + ); } } @@ -534,8 +565,7 @@ public void testSimpleSorts() throws Exception { ensureGreen(); List builders = new ArrayList<>(); for (int i = 0; i < 10; i++) { - IndexRequestBuilder builder = client().prepareIndex("test") - .setId(Integer.toString(i)) + IndexRequestBuilder builder = prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })) @@ -565,172 +595,190 @@ public void testSimpleSorts() throws Exception { refresh(); // STRING - int size = 1 + random.nextInt(10); - - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.ASC).get(); - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat( - searchResponse.getHits().getAt(i).getSortValues()[0].toString(), - equalTo(new String(new char[] { (char) (97 + i), (char) (97 + i) })) - ); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat( + response.getHits().getAt(i).getSortValues()[0].toString(), + equalTo(new String(new char[] { (char) (97 + i), (char) (97 + i) })) + ); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat( - searchResponse.getHits().getAt(i).getSortValues()[0].toString(), - equalTo(new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) })) - ); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat( + response.getHits().getAt(i).getSortValues()[0].toString(), + equalTo(new String(new char[] { (char) (97 + (9 - i)), (char) (97 + (9 - i)) })) + ); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // BYTE - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) i)); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) (9 - i))); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).byteValue(), equalTo((byte) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // SHORT - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) i)); + } + }); } - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) (9 - i))); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).shortValue(), equalTo((short) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // INTEGER - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo(i)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo(i)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo((9 - i))); + } - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).intValue(), equalTo((9 - i))); + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // LONG - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) i)); - } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.ASC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) i)); + } - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.DESC).get(); - assertHitCount(searchResponse, 10L); - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) (9 - i))); + assertThat(response.toString(), not(containsString("error"))); + }); + } + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.DESC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).longValue(), equalTo((long) (9 - i))); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // FLOAT - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC), response -> { + assertHitCount(response, 10); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - // DOUBLE - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC), response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + assertThat(((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d)); + } + assertThat(response.toString(), not(containsString("error"))); + }); } - - assertThat(searchResponse.toString(), not(containsString("error"))); - size = 1 + random.nextInt(10); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC).get(); - - assertHitCount(searchResponse, 10L); - assertThat(searchResponse.getHits().getHits().length, equalTo(size)); - for (int i = 0; i < size; i++) { - assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); - assertThat(((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d)); + { + int size = 1 + random.nextInt(10); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC), + response -> { + assertHitCount(response, 10L); + assertThat(response.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + assertThat( + ((Number) response.getHits().getAt(i).getSortValues()[0]).doubleValue(), + closeTo(0.1d * (9 - i), 0.000001d) + ); + } + } + ); } - - assertNoFailures(searchResponse); } public void testSortMissingNumbers() throws Exception { @@ -752,15 +800,13 @@ public void testSortMissingNumbers() throws Exception { ) ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("i_value", -1).field("d_value", -1.1).endObject()) .get(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource(jsonBuilder().startObject().field("id", "1").field("i_value", 2).field("d_value", 2.2).endObject()) .get(); @@ -768,37 +814,35 @@ public void testSortMissingNumbers() throws Exception { refresh(); logger.info("--> sort with no missing (same as missing _last)"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC)) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _last"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last")) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _first"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first")) - .get(); - assertNoFailures(searchResponse); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first")), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testSortMissingStrings() throws IOException { @@ -817,17 +861,11 @@ public void testSortMissingStrings() throws IOException { ) ); ensureGreen(); - client().prepareIndex("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("id", "1").field("value", "a").endObject()) - .get(); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("id", "1").field("value", "a").endObject()).get(); - client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("id", "2").endObject()).get(); - client().prepareIndex("test") - .setId("3") - .setSource(jsonBuilder().startObject().field("id", "1").field("value", "c").endObject()) - .get(); + prepareIndex("test").setId("3").setSource(jsonBuilder().startObject().field("id", "1").field("value", "c").endObject()).get(); flush(); refresh(); @@ -840,48 +878,53 @@ public void testSortMissingStrings() throws IOException { } logger.info("--> sort with no missing (same as missing _last)"); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _last"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("3")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("2")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + } + ); logger.info("--> sort with missing _first"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); - + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getId(), equalTo("1")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); logger.info("--> sort with missing b"); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b")) - .get(); - assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0)); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b")), + response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + ); } public void testSortMissingDates() throws IOException { @@ -902,9 +945,9 @@ public void testSortMissingDates() throws IOException { ) ); ensureGreen(); - client().prepareIndex(index).setId("1").setSource("mydate", "2021-01-01").get(); - client().prepareIndex(index).setId("2").setSource("mydate", "2021-02-01").get(); - client().prepareIndex(index).setId("3").setSource("other_field", "value").get(); + prepareIndex(index).setId("1").setSource("mydate", "2021-01-01").get(); + prepareIndex(index).setId("2").setSource("mydate", "2021-02-01").get(); + prepareIndex(index).setId("3").setSource("other_field", "value").get(); refresh(); @@ -914,24 +957,27 @@ public void testSortMissingDates() throws IOException { format = type.equals("date") ? "strict_date_optional_time" : "strict_date_optional_time_nanos"; } - SearchResponse searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "1", "2", "3" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "1", "2", "3" }) + ); - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "1", "2" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "3", "1", "2" }) + ); - searchResponse = prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format)) - .get(); - assertHitsInOrder(searchResponse, new String[] { "2", "1", "3" }); + assertResponse( + prepareSearch(index).addSort(SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format)), + response -> assertHitsInOrder(response, new String[] { "2", "1", "3" }) + ); - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format) - ).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "2", "1" }); + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format) + ), + response -> assertHitsInOrder(response, new String[] { "3", "2", "1" }) + ); } } } @@ -960,12 +1006,12 @@ public void testSortMissingDatesMixedTypes() throws IOException { } ensureGreen(); - client().prepareIndex("test_date").setId("1").setSource("mydate", "2021-01-01").get(); - client().prepareIndex("test_date").setId("2").setSource("mydate", "2021-02-01").get(); - client().prepareIndex("test_date").setId("3").setSource("other_field", 1).get(); - client().prepareIndex("test_date_nanos").setId("4").setSource("mydate", "2021-03-01").get(); - client().prepareIndex("test_date_nanos").setId("5").setSource("mydate", "2021-04-01").get(); - client().prepareIndex("test_date_nanos").setId("6").setSource("other_field", 2).get(); + prepareIndex("test_date").setId("1").setSource("mydate", "2021-01-01").get(); + prepareIndex("test_date").setId("2").setSource("mydate", "2021-02-01").get(); + prepareIndex("test_date").setId("3").setSource("other_field", 1).get(); + prepareIndex("test_date_nanos").setId("4").setSource("mydate", "2021-03-01").get(); + prepareIndex("test_date_nanos").setId("5").setSource("mydate", "2021-04-01").get(); + prepareIndex("test_date_nanos").setId("6").setSource("other_field", 2).get(); refresh(); for (boolean withFormat : List.of(true, false)) { @@ -975,25 +1021,33 @@ public void testSortMissingDatesMixedTypes() throws IOException { } String index = "test*"; - SearchResponse searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "1", "2", "4", "5", "3", "6" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "6", "1", "2", "4", "5" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "5", "4", "2", "1", "3", "6" }); - - searchResponse = prepareSearch(index).addSort( - SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format).setNumericType("date_nanos") - ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)).get(); - assertHitsInOrder(searchResponse, new String[] { "3", "6", "5", "4", "2", "1" }); + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.ASC).setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "1", "2", "4", "5", "3", "6" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.ASC).missing("_first").setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "3", "6", "1", "2", "4", "5" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "5", "4", "2", "1", "3", "6" }) + ); + + assertResponse( + prepareSearch(index).addSort( + SortBuilders.fieldSort("mydate").order(SortOrder.DESC).missing("_first").setFormat(format).setNumericType("date_nanos") + ).addSort(SortBuilders.fieldSort("other_field").order(SortOrder.ASC)), + response -> assertHitsInOrder(response, new String[] { "3", "6", "5", "4", "2", "1" }) + ); } } @@ -1010,15 +1064,16 @@ private void assertHitsInOrder(SearchResponse response, String[] expectedIds) { public void testIgnoreUnmapped() throws Exception { createIndex("test"); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource(jsonBuilder().startObject().field("id", "1").field("i_value", -1).field("d_value", -1.1).endObject()) .get(); logger.info("--> sort with an unmapped field, verify it fails"); try { - SearchResponse result = prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("kkk")).get(); - assertThat("Expected exception but returned with", result, nullValue()); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("kkk")), + response -> assertThat("Expected exception but returned with", response, nullValue()) + ); } catch (SearchPhaseExecutionException e) { // we check that it's a parse failure rather than a different shard failure for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { @@ -1084,8 +1139,7 @@ public void testSortMVField() throws Exception { ); ensureGreen(); - client().prepareIndex("test") - .setId(Integer.toString(1)) + prepareIndex("test").setId(Integer.toString(1)) .setSource( jsonBuilder().startObject() .array("long_values", 1L, 5L, 10L, 8L) @@ -1098,8 +1152,7 @@ public void testSortMVField() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId(Integer.toString(2)) + prepareIndex("test").setId(Integer.toString(2)) .setSource( jsonBuilder().startObject() .array("long_values", 11L, 15L, 20L, 7L) @@ -1112,8 +1165,7 @@ public void testSortMVField() throws Exception { .endObject() ) .get(); - client().prepareIndex("test") - .setId(Integer.toString(3)) + prepareIndex("test").setId(Integer.toString(3)) .setSource( jsonBuilder().startObject() .array("long_values", 2L, 1L, 3L, -4L) @@ -1129,252 +1181,243 @@ public void testSortMVField() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(-4L)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(-4L)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(7L)); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(1L)); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.DESC).get(); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(7L)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("long_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(20L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(10L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(3L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.SUM)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(53L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(24L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.AVG)) - .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(20L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(10L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(6L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(1L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.MEDIAN)) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(7L)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(-4d)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(1d)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(7d)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(20d)); - - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(10d)); - - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(3d)); - - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.ASC).get(); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(3L)); + }); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.SUM)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(53L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("!4")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(24L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("01")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.AVG)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("07")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.DESC).get(); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(6L)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(1L)); + } + ); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.MEDIAN)), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).longValue(), equalTo(13L)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).longValue(), equalTo(7L)); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); + } + ); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(-4d)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(1d)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(7d)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(20d)); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(10d)); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(3d)); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.ASC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("!4")); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("01")); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("07")); + }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("string_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); + + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); } public void testSortOnRareField() throws IOException { @@ -1393,86 +1436,80 @@ public void testSortOnRareField() throws IOException { ) ); ensureGreen(); - client().prepareIndex("test") - .setId(Integer.toString(1)) + prepareIndex("test").setId(Integer.toString(1)) .setSource(jsonBuilder().startObject().array("string_values", "01", "05", "10", "08").endObject()) .get(); refresh(); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("10")); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); - client().prepareIndex("test") - .setId(Integer.toString(2)) + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("10")); + }); + prepareIndex("test").setId(Integer.toString(2)) .setSource(jsonBuilder().startObject().array("string_values", "11", "15", "20", "07").endObject()) .get(); for (int i = 0; i < 15; i++) { - client().prepareIndex("test") - .setId(Integer.toString(300 + i)) + prepareIndex("test").setId(Integer.toString(300 + i)) .setSource(jsonBuilder().startObject().array("some_other_field", "foobar").endObject()) .get(); } refresh(); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort("string_values", SortOrder.DESC).get(); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getHits().length, equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); - - client().prepareIndex("test") - .setId(Integer.toString(3)) + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + }); + prepareIndex("test").setId(Integer.toString(3)) .setSource(jsonBuilder().startObject().array("string_values", "02", "01", "03", "!4").endObject()) .get(); for (int i = 0; i < 15; i++) { - client().prepareIndex("test") - .setId(Integer.toString(300 + i)) + prepareIndex("test").setId(Integer.toString(300 + i)) .setSource(jsonBuilder().startObject().array("some_other_field", "foobar").endObject()) .get(); } refresh(); - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); - - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); for (int i = 0; i < 15; i++) { - client().prepareIndex("test") - .setId(Integer.toString(300 + i)) + prepareIndex("test").setId(Integer.toString(300 + i)) .setSource(jsonBuilder().startObject().array("some_other_field", "foobar").endObject()) .get(); refresh(); } - searchResponse = prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC).get(); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(searchResponse.getHits().getAt(0).getSortValues()[0], equalTo("20")); + assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(searchResponse.getHits().getAt(1).getSortValues()[0], equalTo("10")); + assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); + assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); + }); } public void testSortMetaField() throws Exception { @@ -1483,25 +1520,25 @@ public void testSortMetaField() throws Exception { final int numDocs = randomIntBetween(10, 20); IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - indexReqs[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource(); + indexReqs[i] = prepareIndex("test").setId(Integer.toString(i)).setSource(); } indexRandom(true, indexReqs); SortOrder order = randomFrom(SortOrder.values()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort("_id", order) - .get(); - assertNoFailures(searchResponse); - SearchHit[] hits = searchResponse.getHits().getHits(); - BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM; - for (int i = 0; i < hits.length; ++i) { - String idString = hits[i].getId(); - final BytesRef id = new BytesRef(idString); - assertEquals(idString, hits[i].getSortValues()[0]); - assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id)); - previous = id; - } + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(randomIntBetween(1, numDocs + 5)).addSort("_id", order), + response -> { + SearchHit[] hits = response.getHits().getHits(); + BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM; + for (int i = 0; i < hits.length; ++i) { + String idString = hits[i].getId(); + final BytesRef id = new BytesRef(idString); + assertEquals(idString, hits[i].getSortValues()[0]); + assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id)); + previous = id; + } + } + ); // assertWarnings(ID_FIELD_DATA_DEPRECATION_MESSAGE); } finally { // unset cluster setting @@ -1554,8 +1591,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution ); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .startArray("nested") @@ -1569,8 +1605,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution .endObject() ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .startArray("nested") @@ -1588,59 +1623,64 @@ public void testNestedSort() throws IOException, InterruptedException, Execution // We sort on nested field - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)) - .get(); - assertNoFailures(searchResponse); - SearchHit[] hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("cba")); - assertThat(hits[1].getSortValues()[0], is("bar")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("cba")); + assertThat(hits[1].getSortValues()[0], is("bar")); + } + ); // We sort on nested fields with max_children limit - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("nested.foo").setNestedSort(new NestedSortBuilder("nested").setMaxChildren(1)).order(SortOrder.DESC) - ) - .get(); - assertNoFailures(searchResponse); - hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("bar")); - assertThat(hits[1].getSortValues()[0], is("abc")); - - { - SearchPhaseExecutionException exc = expectThrows( - SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("nested.bar.foo") - .setNestedSort( - new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.bar").setMaxChildren(1)) + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("nested.foo") + .setNestedSort(new NestedSortBuilder("nested").setMaxChildren(1)) + .order(SortOrder.DESC) + ), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("bar")); + assertThat(hits[1].getSortValues()[0], is("abc")); + + { + SearchPhaseExecutionException exc = expectThrows( + SearchPhaseExecutionException.class, + () -> prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("nested.bar.foo") + .setNestedSort( + new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.bar").setMaxChildren(1)) + ) + .order(SortOrder.DESC) ) - .order(SortOrder.DESC) - ) - .get() - ); - assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); - } - + .get() + ); + assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); + } + } + ); // We sort on nested sub field - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("nested.foo.sub").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)) - .get(); - assertNoFailures(searchResponse); - hits = searchResponse.getHits().getHits(); - assertThat(hits.length, is(2)); - assertThat(hits[0].getSortValues().length, is(1)); - assertThat(hits[1].getSortValues().length, is(1)); - assertThat(hits[0].getSortValues()[0], is("cba bca")); - assertThat(hits[1].getSortValues()[0], is("bar bar")); - + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo.sub").setNestedSort(new NestedSortBuilder("nested")).order(SortOrder.DESC)), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits.length, is(2)); + assertThat(hits[0].getSortValues().length, is(1)); + assertThat(hits[1].getSortValues().length, is(1)); + assertThat(hits[0].getSortValues()[0], is("cba bca")); + assertThat(hits[1].getSortValues()[0], is("bar bar")); + } + ); // missing nested path SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, @@ -1664,7 +1704,7 @@ public void testSortDuelBetweenSingleShardAndMultiShardIndex() throws Exception for (String index : new String[] { "test1", "test2" }) { List docs = new ArrayList<>(); for (int i = 0; i < 256; i++) { - docs.add(client().prepareIndex(index).setId(Integer.toString(i)).setSource(sortField, i)); + docs.add(prepareIndex(index).setId(Integer.toString(i)).setSource(sortField, i)); } indexRandom(true, docs); } @@ -1673,20 +1713,26 @@ public void testSortDuelBetweenSingleShardAndMultiShardIndex() throws Exception SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; int from = between(0, 256); int size = between(0, 256); - SearchResponse multiShardResponse = prepareSearch("test1").setFrom(from).setSize(size).addSort(sortField, order).get(); - assertNoFailures(multiShardResponse); - SearchResponse singleShardResponse = prepareSearch("test2").setFrom(from).setSize(size).addSort(sortField, order).get(); - assertNoFailures(singleShardResponse); - - assertThat(multiShardResponse.getHits().getTotalHits().value, equalTo(singleShardResponse.getHits().getTotalHits().value)); - assertThat(multiShardResponse.getHits().getHits().length, equalTo(singleShardResponse.getHits().getHits().length)); - for (int i = 0; i < multiShardResponse.getHits().getHits().length; i++) { - assertThat( - multiShardResponse.getHits().getAt(i).getSortValues()[0], - equalTo(singleShardResponse.getHits().getAt(i).getSortValues()[0]) - ); - assertThat(multiShardResponse.getHits().getAt(i).getId(), equalTo(singleShardResponse.getHits().getAt(i).getId())); - } + assertNoFailuresAndResponse( + prepareSearch("test1").setFrom(from).setSize(size).addSort(sortField, order), + multiShardResponse -> assertNoFailuresAndResponse( + prepareSearch("test2").setFrom(from).setSize(size).addSort(sortField, order), + singleShardResponse -> { + assertThat( + multiShardResponse.getHits().getTotalHits().value, + equalTo(singleShardResponse.getHits().getTotalHits().value) + ); + assertThat(multiShardResponse.getHits().getHits().length, equalTo(singleShardResponse.getHits().getHits().length)); + for (int i = 0; i < multiShardResponse.getHits().getHits().length; i++) { + assertThat( + multiShardResponse.getHits().getAt(i).getSortValues()[0], + equalTo(singleShardResponse.getHits().getAt(i).getSortValues()[0]) + ); + assertThat(multiShardResponse.getHits().getAt(i).getId(), equalTo(singleShardResponse.getHits().getAt(i).getId())); + } + } + ) + ); } public void testCustomFormat() throws Exception { @@ -1696,21 +1742,23 @@ public void testCustomFormat() throws Exception { assertAcked(prepareCreate("test").setMapping("ip", "type=ip")); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("test").setId("2").setSource("ip", "2001:db8::ff00:42:8329") + prepareIndex("test").setId("1").setSource("ip", "192.168.1.7"), + prepareIndex("test").setId("2").setSource("ip", "2001:db8::ff00:42:8329") ); - SearchResponse response = prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).get(); - assertNoFailures(response); - assertEquals(2, response.getHits().getTotalHits().value); - assertArrayEquals(new String[] { "192.168.1.7" }, response.getHits().getAt(0).getSortValues()); - assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(1).getSortValues()); - - response = prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).searchAfter(new Object[] { "192.168.1.7" }).get(); - assertNoFailures(response); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals(1, response.getHits().getHits().length); - assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(0).getSortValues()); + assertNoFailuresAndResponse(prepareSearch("test").addSort(SortBuilders.fieldSort("ip")), response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertArrayEquals(new String[] { "192.168.1.7" }, response.getHits().getAt(0).getSortValues()); + assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(1).getSortValues()); + }); + assertNoFailuresAndResponse( + prepareSearch("test").addSort(SortBuilders.fieldSort("ip")).searchAfter(new Object[] { "192.168.1.7" }), + response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals(1, response.getHits().getHits().length); + assertArrayEquals(new String[] { "2001:db8::ff00:42:8329" }, response.getHits().getAt(0).getSortValues()); + } + ); } public void testScriptFieldSort() throws Exception { @@ -1720,7 +1768,7 @@ public void testScriptFieldSort() throws Exception { IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs]; List keywords = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { - indexReqs[i] = client().prepareIndex("test").setSource("number", i, "keyword", Integer.toString(i)); + indexReqs[i] = prepareIndex("test").setSource("number", i, "keyword", Integer.toString(i)); keywords.add(Integer.toString(i)); } Collections.sort(keywords); @@ -1728,34 +1776,38 @@ public void testScriptFieldSort() throws Exception { { Script script = new Script(ScriptType.INLINE, NAME, "doc['number'].value", Collections.emptyMap()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.NUMBER)) - .addSort(SortBuilders.scoreSort()) - .get(); - - double expectedValue = 0; - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getSortValues().length, equalTo(2)); - assertThat(hit.getSortValues()[0], equalTo(expectedValue++)); - assertThat(hit.getSortValues()[1], equalTo(1f)); - } + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(randomIntBetween(1, numDocs + 5)) + .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.NUMBER)) + .addSort(SortBuilders.scoreSort()), + response -> { + double expectedValue = 0; + for (SearchHit hit : response.getHits()) { + assertThat(hit.getSortValues().length, equalTo(2)); + assertThat(hit.getSortValues()[0], equalTo(expectedValue++)); + assertThat(hit.getSortValues()[1], equalTo(1f)); + } + } + ); } { Script script = new Script(ScriptType.INLINE, NAME, "doc['keyword'].value", Collections.emptyMap()); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(randomIntBetween(1, numDocs + 5)) - .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.STRING)) - .addSort(SortBuilders.scoreSort()) - .get(); - - int expectedValue = 0; - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getSortValues().length, equalTo(2)); - assertThat(hit.getSortValues()[0], equalTo(keywords.get(expectedValue++))); - assertThat(hit.getSortValues()[1], equalTo(1f)); - } + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(randomIntBetween(1, numDocs + 5)) + .addSort(SortBuilders.scriptSort(script, ScriptSortBuilder.ScriptSortType.STRING)) + .addSort(SortBuilders.scoreSort()), + response -> { + int expectedValue = 0; + for (SearchHit hit : response.getHits()) { + assertThat(hit.getSortValues().length, equalTo(2)); + assertThat(hit.getSortValues()[0], equalTo(keywords.get(expectedValue++))); + assertThat(hit.getSortValues()[1], equalTo(1f)); + } + } + ); } } @@ -1767,21 +1819,22 @@ public void testFieldAlias() throws Exception { ensureGreen("old_index", "new_index"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("old_index").setSource("distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); - builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); + builders.add(prepareIndex("old_index").setSource("distance", 42.0)); + builders.add(prepareIndex("old_index").setSource("distance", 50.5)); + builders.add(prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("route_length_miles")) - .get(); - SearchHits hits = response.getHits(); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(builders.size()).addSort(SortBuilders.fieldSort("route_length_miles")), + response -> { + SearchHits hits = response.getHits(); - assertEquals(3, hits.getHits().length); - assertEquals(42.0, hits.getAt(0).getSortValues()[0]); - assertEquals(50.5, hits.getAt(1).getSortValues()[0]); - assertEquals(100.2, hits.getAt(2).getSortValues()[0]); + assertEquals(3, hits.getHits().length); + assertEquals(42.0, hits.getAt(0).getSortValues()[0]); + assertEquals(50.5, hits.getAt(1).getSortValues()[0]); + assertEquals(100.2, hits.getAt(2).getSortValues()[0]); + } + ); } public void testFieldAliasesWithMissingValues() throws Exception { @@ -1792,21 +1845,24 @@ public void testFieldAliasesWithMissingValues() throws Exception { ensureGreen("old_index", "new_index"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("old_index").setSource("distance", 42.0)); - builders.add(client().prepareIndex("old_index").setSource(Collections.emptyMap())); - builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); + builders.add(prepareIndex("old_index").setSource("distance", 42.0)); + builders.add(prepareIndex("old_index").setSource(Collections.emptyMap())); + builders.add(prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("route_length_miles").missing(120.3)) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(3, hits.getHits().length); - assertEquals(42.0, hits.getAt(0).getSortValues()[0]); - assertEquals(100.2, hits.getAt(1).getSortValues()[0]); - assertEquals(120.3, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("route_length_miles").missing(120.3)), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(3, hits.getHits().length); + assertEquals(42.0, hits.getAt(0).getSortValues()[0]); + assertEquals(100.2, hits.getAt(1).getSortValues()[0]); + assertEquals(120.3, hits.getAt(2).getSortValues()[0]); + } + ); } public void testCastNumericType() throws Exception { @@ -1816,40 +1872,46 @@ public void testCastNumericType() throws Exception { ensureGreen("index_double", "index_long", "index_float"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("index_double").setSource("field", 12.6)); - builders.add(client().prepareIndex("index_long").setSource("field", 12)); - builders.add(client().prepareIndex("index_float").setSource("field", 12.1)); + builders.add(prepareIndex("index_double").setSource("field", 12.6)); + builders.add(prepareIndex("index_long").setSource("field", 12)); + builders.add(prepareIndex("index_float").setSource("field", 12.1)); indexRandom(true, true, builders); { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("field").setNumericType("long")) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(3, hits.getHits().length); - for (int i = 0; i < 3; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(12L, hits.getAt(0).getSortValues()[0]); - assertEquals(12L, hits.getAt(1).getSortValues()[0]); - assertEquals(12L, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("field").setNumericType("long")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(3, hits.getHits().length); + for (int i = 0; i < 3; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(12L, hits.getAt(0).getSortValues()[0]); + assertEquals(12L, hits.getAt(1).getSortValues()[0]); + assertEquals(12L, hits.getAt(2).getSortValues()[0]); + } + ); } { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(builders.size()) - .addSort(SortBuilders.fieldSort("field").setNumericType("double")) - .get(); - SearchHits hits = response.getHits(); - assertEquals(3, hits.getHits().length); - for (int i = 0; i < 3; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Double.class)); - } - assertEquals(12D, hits.getAt(0).getSortValues()[0]); - assertEquals(12.1D, (double) hits.getAt(1).getSortValues()[0], 0.001f); - assertEquals(12.6D, hits.getAt(2).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .setSize(builders.size()) + .addSort(SortBuilders.fieldSort("field").setNumericType("double")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(3, hits.getHits().length); + for (int i = 0; i < 3; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Double.class)); + } + assertEquals(12D, hits.getAt(0).getSortValues()[0]); + assertEquals(12.1D, (double) hits.getAt(1).getSortValues()[0], 0.001f); + assertEquals(12.6D, hits.getAt(2).getSortValues()[0]); + } + ); } } @@ -1859,105 +1921,119 @@ public void testCastDate() throws Exception { ensureGreen("index_date", "index_date_nanos"); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("index_date").setSource("field", "2024-04-11T23:47:17")); - builders.add(client().prepareIndex("index_date_nanos").setSource("field", "2024-04-11T23:47:16.854775807Z")); + builders.add(prepareIndex("index_date").setSource("field", "2024-04-11T23:47:17")); + builders.add(prepareIndex("index_date_nanos").setSource("field", "2024-04-11T23:47:16.854775807Z")); indexRandom(true, true, builders); { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(2) - .addSort(SortBuilders.fieldSort("field").setNumericType("date")) - .get(); - SearchHits hits = response.getHits(); - - assertEquals(2, hits.getHits().length); - for (int i = 0; i < 2; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); - assertEquals(1712879237000L, hits.getAt(1).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date")) - .get(); - hits = response.getHits(); - - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date")) - .get(); - hits = response.getHits(); - - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879237000L, hits.getAt(0).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort(SortBuilders.fieldSort("field").setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(2, hits.getHits().length); + for (int i = 0; i < 2; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); + assertEquals(1712879237000L, hits.getAt(1).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879236854L, hits.getAt(0).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date")), + response -> { + SearchHits hits = response.getHits(); + + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879237000L, hits.getAt(0).getSortValues()[0]); + } + ); } { - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(2) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - SearchHits hits = response.getHits(); - assertEquals(2, hits.getHits().length); - for (int i = 0; i < 2; i++) { - assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); - } - assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); - assertEquals(1712879237000000000L, hits.getAt(1).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - hits = response.getHits(); - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); - - response = prepareSearch().setMaxConcurrentShardRequests(1) - .setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date_nanos")) - .get(); - hits = response.getHits(); - assertEquals(1, hits.getHits().length); - assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); - assertEquals(1712879237000000000L, hits.getAt(0).getSortValues()[0]); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(2).addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(2, hits.getHits().length); + for (int i = 0; i < 2; i++) { + assertThat(hits.getAt(i).getSortValues()[0].getClass(), equalTo(Long.class)); + } + assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); + assertEquals(1712879237000000000L, hits.getAt(1).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879236854775807L, hits.getAt(0).getSortValues()[0]); + } + ); + assertResponse( + prepareSearch().setMaxConcurrentShardRequests(1) + .setQuery(matchAllQuery()) + .setSize(1) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.DESC).setNumericType("date_nanos")), + response -> { + SearchHits hits = response.getHits(); + assertEquals(1, hits.getHits().length); + assertThat(hits.getAt(0).getSortValues()[0].getClass(), equalTo(Long.class)); + assertEquals(1712879237000000000L, hits.getAt(0).getSortValues()[0]); + } + ); } { builders.clear(); - builders.add(client().prepareIndex("index_date").setSource("field", "1905-04-11T23:47:17")); + builders.add(prepareIndex("index_date").setSource("field", "1905-04-11T23:47:17")); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(matchAllQuery()) - .setSize(1) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - assertNotNull(response.getShardFailures()); - assertThat(response.getShardFailures().length, equalTo(1)); - assertThat(response.getShardFailures()[0].toString(), containsString("are before the epoch in 1970")); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(1).addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + assertNotNull(response.getShardFailures()); + assertThat(response.getShardFailures().length, equalTo(1)); + assertThat(response.getShardFailures()[0].toString(), containsString("are before the epoch in 1970")); + } + ); } { builders.clear(); - builders.add(client().prepareIndex("index_date").setSource("field", "2346-04-11T23:47:17")); + builders.add(prepareIndex("index_date").setSource("field", "2346-04-11T23:47:17")); indexRandom(true, true, builders); - SearchResponse response = prepareSearch().setQuery(QueryBuilders.rangeQuery("field").gt("1970-01-01")) - .setSize(10) - .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")) - .get(); - assertNotNull(response.getShardFailures()); - assertThat(response.getShardFailures().length, equalTo(1)); - assertThat(response.getShardFailures()[0].toString(), containsString("are after 2262")); + assertResponse( + prepareSearch().setQuery(QueryBuilders.rangeQuery("field").gt("1970-01-01")) + .setSize(10) + .addSort(SortBuilders.fieldSort("field").setNumericType("date_nanos")), + response -> { + assertNotNull(response.getShardFailures()); + assertThat(response.getShardFailures().length, equalTo(1)); + assertThat(response.getShardFailures()[0].toString(), containsString("are after 2262")); + } + ); } } @@ -1991,33 +2067,39 @@ public void testLongSortOptimizationCorrectResults() { bulkBuilder = client().prepareBulk(); } String source = "{\"long_field\":" + randomLong() + "}"; - bulkBuilder.add(client().prepareIndex("test1").setId(Integer.toString(i)).setSource(source, XContentType.JSON)); + bulkBuilder.add(prepareIndex("test1").setId(Integer.toString(i)).setSource(source, XContentType.JSON)); } refresh(); // *** 1. sort DESC on long_field - SearchResponse searchResponse = prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.DESC)).setSize(10).get(); - assertNoFailures(searchResponse); - long previousLong = Long.MAX_VALUE; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - // check the correct sort order - SearchHit hit = searchResponse.getHits().getHits()[i]; - long currentLong = (long) hit.getSortValues()[0]; - assertThat("sort order is incorrect", currentLong, lessThanOrEqualTo(previousLong)); - previousLong = currentLong; - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.DESC)).setSize(10), + response -> { + long previousLong = Long.MAX_VALUE; + for (int i = 0; i < response.getHits().getHits().length; i++) { + // check the correct sort order + SearchHit hit = response.getHits().getHits()[i]; + long currentLong = (long) hit.getSortValues()[0]; + assertThat("sort order is incorrect", currentLong, lessThanOrEqualTo(previousLong)); + previousLong = currentLong; + } + } + ); // *** 2. sort ASC on long_field - searchResponse = prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.ASC)).setSize(10).get(); - assertNoFailures(searchResponse); - previousLong = Long.MIN_VALUE; - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - // check the correct sort order - SearchHit hit = searchResponse.getHits().getHits()[i]; - long currentLong = (long) hit.getSortValues()[0]; - assertThat("sort order is incorrect", currentLong, greaterThanOrEqualTo(previousLong)); - previousLong = currentLong; - } + assertNoFailuresAndResponse( + prepareSearch().addSort(new FieldSortBuilder("long_field").order(SortOrder.ASC)).setSize(10), + response -> { + long previousLong = Long.MIN_VALUE; + for (int i = 0; i < response.getHits().getHits().length; i++) { + // check the correct sort order + SearchHit hit = response.getHits().getHits()[i]; + long currentLong = (long) hit.getSortValues()[0]; + assertThat("sort order is incorrect", currentLong, greaterThanOrEqualTo(previousLong)); + previousLong = currentLong; + } + } + ); } public void testSortMixedFieldTypes() { @@ -2026,17 +2108,14 @@ public void testSortMixedFieldTypes() { assertAcked(prepareCreate("index_double").setMapping("foo", "type=double").get()); assertAcked(prepareCreate("index_keyword").setMapping("foo", "type=keyword").get()); - client().prepareIndex("index_long").setId("1").setSource("foo", "123").get(); - client().prepareIndex("index_integer").setId("1").setSource("foo", "123").get(); - client().prepareIndex("index_double").setId("1").setSource("foo", "123").get(); - client().prepareIndex("index_keyword").setId("1").setSource("foo", "123").get(); + prepareIndex("index_long").setId("1").setSource("foo", "123").get(); + prepareIndex("index_integer").setId("1").setSource("foo", "123").get(); + prepareIndex("index_double").setId("1").setSource("foo", "123").get(); + prepareIndex("index_keyword").setId("1").setSource("foo", "123").get(); refresh(); { // mixing long and integer types is ok, as we convert integer sort to long sort - SearchResponse searchResponse = prepareSearch("index_long", "index_integer").addSort(new FieldSortBuilder("foo")) - .setSize(10) - .get(); - assertNoFailures(searchResponse); + assertNoFailures(prepareSearch("index_long", "index_integer").addSort(new FieldSortBuilder("foo")).setSize(10)); } String errMsg = "Can't sort on field [foo]; the field has incompatible sort types"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java index 777db15b596ec..2d0fbb42a42e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.settings.Settings; @@ -29,6 +28,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -56,8 +56,7 @@ public void testDistanceSortingMVFields() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .field("names", "New York") @@ -69,8 +68,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") + prepareIndex("test").setId("2") .setSource( jsonBuilder().startObject() .field("names", "New York 2") @@ -82,8 +80,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("3") + prepareIndex("test").setId("3") .setSource( jsonBuilder().startObject() .array("names", "Times Square", "Tribeca") @@ -103,8 +100,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("4") + prepareIndex("test").setId("4") .setSource( jsonBuilder().startObject() .array("names", "Wall Street", "Soho") @@ -124,8 +120,7 @@ public void testDistanceSortingMVFields() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("5") + prepareIndex("test").setId("5") .setSource( jsonBuilder().startObject() .array("names", "Greenwich Village", "Brooklyn") @@ -148,81 +143,87 @@ public void testDistanceSortingMVFields() throws Exception { indicesAdmin().prepareRefresh().get(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "3", "4", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "3", "4", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + } + ); // Order: Asc, Mode: max - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "4", "3", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + } + ); // Order: Desc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "3", "4", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); // Order: Desc, Mode: min - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "4", "3", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2874d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(5301d, 10d)); - - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.DESC)) - .get(); - - assertHitCount(searchResponse, 5); - assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "4", "3", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "1", "2", "4", "3", "5"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2874d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(5301d, 10d)); + } + ); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.DESC)), + response -> { + assertHitCount(response, 5); + assertOrderedSearchHits(response, "5", "3", "4", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(421.2d, 10d)); + assertThat(((Number) response.getHits().getAt(4).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); try { prepareSearch("test").setQuery(matchAllQuery()) .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.SUM)); @@ -247,8 +248,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(xContentBuilder)); ensureGreen(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource( jsonBuilder().startObject() .array("names", "Times Square", "Tribeca") @@ -268,33 +268,33 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { ) .get(); - client().prepareIndex("test") - .setId("2") - .setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()) - .get(); + prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()).get(); refresh(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "1", "2"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 2); + assertOrderedSearchHits(response, "1", "2"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); // Order: Desc - searchResponse = prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - - // Doc with missing geo point is first, is consistent with 0.20.x - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + assertResponse( + prepareSearch("test").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + // Doc with missing geo point is first, is consistent with 0.20.x + assertHitCount(response, 2); + assertOrderedSearchHits(response, "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + } + ); } public void testDistanceSortingNestedFields() throws Exception { @@ -322,8 +322,7 @@ public void testDistanceSortingNestedFields() throws Exception { indexRandom( true, - client().prepareIndex("companies") - .setId("1") + prepareIndex("companies").setId("1") .setSource( jsonBuilder().startObject() .field("name", "company 1") @@ -338,8 +337,7 @@ public void testDistanceSortingNestedFields() throws Exception { .endArray() .endObject() ), - client().prepareIndex("companies") - .setId("2") + prepareIndex("companies").setId("2") .setSource( jsonBuilder().startObject() .field("name", "company 2") @@ -363,8 +361,7 @@ public void testDistanceSortingNestedFields() throws Exception { .endArray() .endObject() ), - client().prepareIndex("companies") - .setId("3") + prepareIndex("companies").setId("3") .setSource( jsonBuilder().startObject() .field("name", "company 3") @@ -387,8 +384,7 @@ public void testDistanceSortingNestedFields() throws Exception { .endArray() .endObject() ), - client().prepareIndex("companies") - .setId("4") + prepareIndex("companies").setId("4") .setSource( jsonBuilder().startObject() .field("name", "company 4") @@ -416,119 +412,127 @@ public void testDistanceSortingNestedFields() throws Exception { ); // Order: Asc - SearchResponse searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.ASC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "2", "3", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.ASC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "2", "3", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + } + ); // Order: Asc, Mode: max - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.ASC) - .sortMode(SortMode.MAX) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "3", "2", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.ASC) + .sortMode(SortMode.MAX) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "3", "2", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + } + ); // Order: Desc - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.DESC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "2", "3", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.DESC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "2", "3", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1258.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); // Order: Desc, Mode: min - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .order(SortOrder.DESC) - .sortMode(SortMode.MIN) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "3", "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - .setNestedSort(new NestedSortBuilder("branches")) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "1", "3", "2", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .setNestedSort(new NestedSortBuilder("branches")) - .sortMode(SortMode.AVG) - .order(SortOrder.DESC) - ) - .get(); - - assertHitCount(searchResponse, 4); - assertOrderedSearchHits(searchResponse, "4", "2", "3", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); - - searchResponse = prepareSearch("companies").setQuery(matchAllQuery()) - .addSort( - SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .setNestedSort(new NestedSortBuilder("branches").setFilter(termQuery("branches.name", "brooklyn"))) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ) - .get(); - assertHitCount(searchResponse, 4); - assertFirstHit(searchResponse, hasId("4")); - assertSearchHits(searchResponse, "1", "2", "3", "4"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(3).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .order(SortOrder.DESC) + .sortMode(SortMode.MIN) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "3", "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(2029.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1055.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + .setNestedSort(new NestedSortBuilder("branches")) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "1", "3", "2", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .setNestedSort(new NestedSortBuilder("branches")) + .sortMode(SortMode.AVG) + .order(SortOrder.DESC) + ), + response -> { + assertHitCount(response, 4); + assertOrderedSearchHits(response, "4", "2", "3", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(5301.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(2874.0d, 10d)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), closeTo(1157.0d, 10d)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), closeTo(0d, 10d)); + } + ); + assertResponse( + prepareSearch("companies").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) + .setNestedSort(new NestedSortBuilder("branches").setFilter(termQuery("branches.name", "brooklyn"))) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ), + response -> { + assertHitCount(response, 4); + assertFirstHit(response, hasId("4")); + assertSearchHits(response, "1", "2", "3", "4"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(8572.0d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(3).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); try { prepareSearch("companies").setQuery(matchAllQuery()) .addSort( @@ -562,7 +566,7 @@ public void testGeoDistanceFilter() throws IOException { XContentBuilder source = JsonXContent.contentBuilder().startObject().field("pin", Geohash.stringEncode(lon, lat)).endObject(); assertAcked(prepareCreate("locations").setSettings(settings).setMapping(mapping)); - client().prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); + prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); refresh(); client().prepareGet("locations", "1").get(); @@ -585,8 +589,7 @@ public void testDistanceSortingWithUnmappedField() throws Exception { assertAcked(prepareCreate("test2")); ensureGreen(); - client().prepareIndex("test1") - .setId("1") + prepareIndex("test1").setId("1") .setSource( jsonBuilder().startObject() .array("names", "Times Square", "Tribeca") @@ -606,40 +609,41 @@ public void testDistanceSortingWithUnmappedField() throws Exception { ) .get(); - client().prepareIndex("test2") - .setId("2") - .setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()) - .get(); + prepareIndex("test2").setId("2").setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()).get(); refresh(); // Order: Asc - SearchResponse searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.ASC)) - .get(); - - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "1", "2"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.ASC)), + response -> { + assertHitCount(response, 2); + assertOrderedSearchHits(response, "1", "2"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + } + ); // Order: Desc - searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.DESC)) - .get(); - - // Doc with missing geo point is first, is consistent with 0.20.x - assertHitCount(searchResponse, 2); - assertOrderedSearchHits(searchResponse, "2", "1"); - assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); - + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.DESC)), + response -> { + // Doc with missing geo point is first, is consistent with 0.20.x + assertHitCount(response, 2); + assertOrderedSearchHits(response, "2", "1"); + assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + } + ); // Make sure that by default the unmapped fields continue to fail - searchResponse = prepareSearch("test1", "test2").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)) - .get(); - assertThat(searchResponse.getFailedShards(), greaterThan(0)); - assertHitCount(searchResponse, 1); + assertResponse( + prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)), + response -> { + assertThat(response.getFailedShards(), greaterThan(0)); + assertHitCount(response, 1); + } + ); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 54d730cec2bc3..67426caf2faab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -32,6 +32,7 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.closeTo; @@ -70,11 +71,7 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce logger.info("d1: {}", d1Builder); logger.info("d2: {}", d2Builder); - indexRandom( - true, - client().prepareIndex("index").setId("d1").setSource(d1Builder), - client().prepareIndex("index").setId("d2").setSource(d2Builder) - ); + indexRandom(true, prepareIndex("index").setId("d1").setSource(d1Builder), prepareIndex("index").setId("d2").setSource(d2Builder)); GeoPoint[] q = new GeoPoint[2]; if (randomBoolean()) { q[0] = new GeoPoint(2, 1); @@ -84,56 +81,65 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce q[0] = new GeoPoint(2, 1); } - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MIN).order(SortOrder.DESC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 5, 1, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 3, 2, DistanceUnit.METERS), 10d) + ); + } ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MAX).order(SortOrder.DESC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 1, 6, 2, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2, 2, 4, 1, DistanceUnit.METERS), 10d) + ); + } ); } @@ -157,37 +163,38 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc logger.info("d1: {}", d1Builder); logger.info("d2: {}", d2Builder); - indexRandom( - true, - client().prepareIndex("index").setId("d1").setSource(d1Builder), - client().prepareIndex("index").setId("d2").setSource(d2Builder) - ); + indexRandom(true, prepareIndex("index").setId("d1").setSource(d1Builder), prepareIndex("index").setId("d2").setSource(d2Builder)); GeoPoint q = new GeoPoint(0, 0); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d2", "d1"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.AVG).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d2", "d1"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + assertResponse( + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, q).sortMode(SortMode.MEDIAN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 4, DistanceUnit.METERS), 10d) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d) + ); + } ); } @@ -227,11 +234,7 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept GeoPoint[] d2Points = { new GeoPoint(4.5, 1), new GeoPoint(4.75, 2), new GeoPoint(5, 3), new GeoPoint(5.25, 4) }; createShuffeldJSONArray(d2Builder, d2Points); - indexRandom( - true, - client().prepareIndex("index").setId("d1").setSource(d1Builder), - client().prepareIndex("index").setId("d2").setSource(d2Builder) - ); + indexRandom(true, prepareIndex("index").setId("d1").setSource(d1Builder), prepareIndex("index").setId("d2").setSource(d2Builder)); List qPoints = Arrays.asList(new GeoPoint(2, 1), new GeoPoint(2, 2), new GeoPoint(2, 3), new GeoPoint(2, 4)); Collections.shuffle(qPoints, random()); @@ -245,30 +248,33 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept } } - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) - ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) - ); - - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC)) - .get(); - assertOrderedSearchHits(searchResponse, "d1", "d2"); - assertThat( - (Double) searchResponse.getHits().getAt(0).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + } ); - assertThat( - (Double) searchResponse.getHits().getAt(1).getSortValues()[0], - closeTo(GeoDistance.ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC)), + response -> { + assertOrderedSearchHits(response, "d1", "d2"); + assertThat( + (Double) response.getHits().getAt(0).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + assertThat( + (Double) response.getHits().getAt(1).getSortValues()[0], + closeTo(GeoDistance.ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1) + ); + } ); } @@ -277,11 +283,9 @@ public void testSinglePointGeoDistanceSort() throws ExecutionException, Interrup assertAcked(prepareCreate("index").setMapping(LOCATION_FIELD, "type=geo_point")); indexRandom( true, - client().prepareIndex("index") - .setId("d1") + prepareIndex("index").setId("d1") .setSource(jsonBuilder().startObject().startObject(LOCATION_FIELD).field("lat", 1).field("lon", 1).endObject().endObject()), - client().prepareIndex("index") - .setId("d2") + prepareIndex("index").setId("d2") .setSource(jsonBuilder().startObject().startObject(LOCATION_FIELD).field("lat", 1).field("lon", 2).endObject().endObject()) ); @@ -289,42 +293,48 @@ public void testSinglePointGeoDistanceSort() throws ExecutionException, Interrup GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, hashPoint); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, new GeoPoint(2, 2)); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, 2, 2); - searchResponse = prepareSearch().setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource( - new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0")) - ).get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0"))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))) - .get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), + response -> checkCorrectSortOrderForGeoSort(response) + ); - searchResponse = prepareSearch().setSource( - new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0).validation(GeoValidationMethod.COERCE)) - ).get(); - checkCorrectSortOrderForGeoSort(searchResponse); + assertResponse( + prepareSearch().setSource( + new SearchSourceBuilder().sort( + SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0).validation(GeoValidationMethod.COERCE) + ) + ), + response -> checkCorrectSortOrderForGeoSort(response) + ); } private static void checkCorrectSortOrderForGeoSort(SearchResponse searchResponse) { @@ -347,8 +357,8 @@ public void testCrossIndexIgnoreUnmapped() throws Exception { indexRandom( true, - client().prepareIndex("test1").setSource("str_field", "bcd", "long_field", 3, "double_field", 0.65), - client().prepareIndex("test2").setSource() + prepareIndex("test1").setSource("str_field", "bcd", "long_field", 3, "double_field", 0.65), + prepareIndex("test2").setSource() ); assertSortValues( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java index db06eb1b5de0b..c7b934df0394f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/SimpleSortIT.java @@ -147,8 +147,7 @@ public void testSimpleSorts() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 10; i++) { builders.add( - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource( jsonBuilder().startObject() .field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })) @@ -240,8 +239,7 @@ public void testSortMinValueScript() throws IOException { ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId("" + i) + prepareIndex("test").setId("" + i) .setSource( jsonBuilder().startObject() .field("ord", i) @@ -258,7 +256,7 @@ public void testSortMinValueScript() throws IOException { } for (int i = 10; i < 20; i++) { // add some docs that don't have values in those fields - client().prepareIndex("test").setId("" + i).setSource(jsonBuilder().startObject().field("ord", i).endObject()).get(); + prepareIndex("test").setId("" + i).setSource(jsonBuilder().startObject().field("ord", i).endObject()).get(); } indicesAdmin().prepareRefresh("test").get(); @@ -349,11 +347,11 @@ public void testDocumentsWithNullValue() throws Exception { assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "1").field("svalue", "aaa").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "1").field("svalue", "aaa").endObject()).get(); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "2").nullField("svalue").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "2").nullField("svalue").endObject()).get(); - client().prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "3").field("svalue", "bbb").endObject()).get(); + prepareIndex("test").setSource(jsonBuilder().startObject().field("id", "3").field("svalue", "bbb").endObject()).get(); flush(); refresh(); @@ -432,10 +430,7 @@ public void test2920() throws IOException { ); ensureGreen(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("value", "" + i).endObject()) - .get(); + prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("value", "" + i).endObject()).get(); } refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java index 1860082c833ad..2967bdc454aed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.NestedQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; @@ -22,6 +21,7 @@ import java.util.Collections; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -31,59 +31,66 @@ public void testSimple() { assertAcked(prepareCreate("test")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value").get(); + prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); - - response = prepareSearch("test").storedFields("_none_").get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse(prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); + }); + + assertResponse(prepareSearch("test").storedFields("_none_"), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + }); } public void testInnerHits() { assertAcked(prepareCreate("test").setMapping("nested", "type=nested")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); + prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_") - .setFetchSource(false) - .setQuery( - new NestedQueryBuilder("nested", new TermQueryBuilder("nested.title", "foo"), ScoreMode.Total).innerHit( - new InnerHitBuilder().setStoredFieldNames(Collections.singletonList("_none_")) - .setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) - ) - ) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); - SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); - assertThat(hits.getTotalHits().value, equalTo(1L)); - assertThat(hits.getAt(0).getId(), nullValue()); - assertThat(hits.getAt(0).getSourceAsString(), nullValue()); + assertResponse( + prepareSearch("test").storedFields("_none_") + .setFetchSource(false) + .setQuery( + new NestedQueryBuilder("nested", new TermQueryBuilder("nested.title", "foo"), ScoreMode.Total).innerHit( + new InnerHitBuilder().setStoredFieldNames(Collections.singletonList("_none_")) + .setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE) + ) + ), + response -> { + assertThat(response.getHits().getTotalHits().value, equalTo(1L)); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + SearchHits hits = response.getHits().getAt(0).getInnerHits().get("nested"); + assertThat(hits.getTotalHits().value, equalTo(1L)); + assertThat(hits.getAt(0).getId(), nullValue()); + assertThat(hits.getAt(0).getSourceAsString(), nullValue()); + } + ); } public void testWithRouting() { assertAcked(prepareCreate("test")); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); + prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); refresh(); - SearchResponse response = prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse(prepareSearch("test").storedFields("_none_").setFetchSource(false), response -> { + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).field("_routing"), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - response = prepareSearch("test").storedFields("_none_").get(); - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + response = prepareSearch("test").storedFields("_none_").get(); + assertThat(response.getHits().getAt(0).getId(), nullValue()); + assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + }); } public void testInvalid() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java index 3fcbc5cf4add6..81facfa8116c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java @@ -8,9 +8,9 @@ package org.elasticsearch.search.source; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESIntegTestCase; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; @@ -23,14 +23,17 @@ public void testSourceDefaultBehavior() { indexDoc("test", "1", "field", "value"); refresh(); - SearchResponse response = prepareSearch("test").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertResponse(prepareSearch("test"), response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue())); - response = prepareSearch("test").addStoredField("bla").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); + assertResponse( + prepareSearch("test").addStoredField("bla"), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()) + ); - response = prepareSearch("test").addStoredField("_source").get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertResponse( + prepareSearch("test").addStoredField("_source"), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) + ); } @@ -38,29 +41,33 @@ public void testSourceFiltering() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); + prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); refresh(); - SearchResponse response = prepareSearch("test").setFetchSource(false).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - - response = prepareSearch("test").setFetchSource(true).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - - response = prepareSearch("test").setFetchSource("field1", null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - - response = prepareSearch("test").setFetchSource("hello", null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); - - response = prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - + assertResponse( + prepareSearch("test").setFetchSource(false), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()) + ); + + assertResponse( + prepareSearch("test").setFetchSource(true), + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) + ); + + assertResponse(prepareSearch("test").setFetchSource("field1", null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); + }); + assertResponse(prepareSearch("test").setFetchSource("hello", null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); + }); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); + }); } /** @@ -71,17 +78,18 @@ public void testSourceWithWildcardFiltering() { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("field", "value").get(); + prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - SearchResponse response = prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); - - response = prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null).get(); - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + }); + assertResponse(prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null), response -> { + assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java index 32f5e14b944a2..0d850a3708044 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.stats.FieldUsageShardResponse; import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction; import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -30,6 +29,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; public class FieldUsageStatsIT extends ESIntegTestCase { @@ -55,8 +55,7 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio LocalDate date = LocalDate.of(2015, 9, 1); for (int i = 0; i < 30; i++) { - client().prepareIndex("test") - .setId(Integer.toString(i)) + prepareIndex("test").setId(Integer.toString(i)) .setSource("field", "value", "field2", "value2", "date_field", formatter.format(date.plusDays(i))) .get(); } @@ -73,16 +72,18 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio assertFalse(stats.hasField("field2")); assertFalse(stats.hasField("date_field")); - SearchResponse searchResponse = prepareSearch().setSearchType(SearchType.DEFAULT) - .setQuery(QueryBuilders.termQuery("field", "value")) - .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) - .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2"))) - .setSize(between(5, 100)) - .setPreference("fixed") - .get(); - - assertHitCount(searchResponse, 30); - assertAllSuccessful(searchResponse); + assertResponse( + prepareSearch().setSearchType(SearchType.DEFAULT) + .setQuery(QueryBuilders.termQuery("field", "value")) + .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword")) + .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2"))) + .setSize(between(5, 100)) + .setPreference("fixed"), + response -> { + assertHitCount(response, 30); + assertAllSuccessful(response); + } + ); stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test")); logger.info("Stats after first query: {}", stats); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index 07e8c516eda41..23384d1b199f9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -39,7 +38,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -83,7 +83,7 @@ public void testSimpleStats() throws Exception { assertAcked(prepareCreate("test1").setSettings(indexSettings(shardsIdx1, 0))); int docsTest1 = scaledRandomIntBetween(3 * shardsIdx1, 5 * shardsIdx1); for (int i = 0; i < docsTest1; i++) { - client().prepareIndex("test1").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test1").setId(Integer.toString(i)).setSource("field", "value").get(); if (rarely()) { refresh(); } @@ -91,7 +91,7 @@ public void testSimpleStats() throws Exception { assertAcked(prepareCreate("test2").setSettings(indexSettings(shardsIdx2, 0))); int docsTest2 = scaledRandomIntBetween(3 * shardsIdx2, 5 * shardsIdx2); for (int i = 0; i < docsTest2; i++) { - client().prepareIndex("test2").setId(Integer.toString(i)).setSource("field", "value").get(); + prepareIndex("test2").setId(Integer.toString(i)).setSource("field", "value").get(); if (rarely()) { refresh(); } @@ -103,16 +103,22 @@ public void testSimpleStats() throws Exception { refresh(); int iters = scaledRandomIntBetween(100, 150); for (int i = 0; i < iters; i++) { - SearchResponse searchResponse = internalCluster().coordOnlyNodeClient() - .prepareSearch() - .setQuery(QueryBuilders.termQuery("field", "value")) - .setStats("group1", "group2") - .highlighter(new HighlightBuilder().field("field")) - .addScriptField("script1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.field", Collections.emptyMap())) - .setSize(100) - .get(); - assertHitCount(searchResponse, docsTest1 + docsTest2); - assertAllSuccessful(searchResponse); + assertResponse( + internalCluster().coordOnlyNodeClient() + .prepareSearch() + .setQuery(QueryBuilders.termQuery("field", "value")) + .setStats("group1", "group2") + .highlighter(new HighlightBuilder().field("field")) + .addScriptField( + "script1", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_source.field", Collections.emptyMap()) + ) + .setSize(100), + response -> { + assertHitCount(response, docsTest1 + docsTest2); + assertAllSuccessful(response); + } + ); } IndicesStatsResponse indicesStats = indicesAdmin().prepareStats().get(); @@ -175,11 +181,7 @@ public void testOpenContexts() { final int docs = scaledRandomIntBetween(20, 50); for (int s = 0; s < numAssignedShards(index); s++) { for (int i = 0; i < docs; i++) { - client().prepareIndex(index) - .setId(Integer.toString(s * docs + i)) - .setSource("field", "value") - .setRouting(Integer.toString(s)) - .get(); + prepareIndex(index).setId(Integer.toString(s * docs + i)).setSource("field", "value").setRouting(Integer.toString(s)).get(); } } indicesAdmin().prepareRefresh(index).get(); @@ -188,11 +190,15 @@ public void testOpenContexts() { assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0L)); int size = scaledRandomIntBetween(1, docs); - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setSize(size) - .setScroll(TimeValue.timeValueMinutes(2)) - .get(); - assertNoFailures(searchResponse); + final String[] scroll = new String[1]; + final int[] total = new int[1]; + assertNoFailuresAndResponse( + prepareSearch().setQuery(matchAllQuery()).setSize(size).setScroll(TimeValue.timeValueMinutes(2)), + response -> { + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + } + ); // refresh the stats now that scroll contexts are opened indicesStats = indicesAdmin().prepareStats(index).get(); @@ -202,11 +208,14 @@ public void testOpenContexts() { int hits = 0; while (true) { - if (searchResponse.getHits().getHits().length == 0) { + if (total[0] == 0) { break; } - hits += searchResponse.getHits().getHits().length; - searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).get(); + hits += total[0]; + assertResponse(client().prepareSearchScroll(scroll[0]).setScroll(TimeValue.timeValueMinutes(2)), response -> { + scroll[0] = response.getScrollId(); + total[0] = response.getHits().getHits().length; + }); } long expected = 0; @@ -220,7 +229,7 @@ public void testOpenContexts() { assertEquals(hits, docs * numAssignedShards(index)); assertThat(stats.getQueryCount(), greaterThanOrEqualTo(expected)); - clearScroll(searchResponse.getScrollId()); + clearScroll(scroll[0]); indicesStats = indicesAdmin().prepareStats().get(); stats = indicesStats.getTotal().getSearch().getTotal(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 9592d3904a90d..b5f7468d1645c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -58,6 +58,7 @@ import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -94,8 +95,7 @@ public void testTieBreak() throws Exception { String value = "a" + randomAlphaOfLengthBetween(1, 10); entries[i] = value; indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject().startObject(FIELD).field("input", value).field("weight", 10).endObject().endObject() ) @@ -117,8 +117,7 @@ public void testPrefix() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -145,8 +144,7 @@ public void testTextAndGlobalText() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -159,18 +157,22 @@ public void testTextAndGlobalText() throws Exception { } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder noText = SuggestBuilders.completionSuggestion(FIELD); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg") - ).get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg")), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); CompletionSuggestionBuilder withText = SuggestBuilders.completionSuggestion(FIELD).text("sugg"); - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText)).get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText)), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); // test that suggestion text takes precedence over global text - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")).get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); } public void testRegex() throws Exception { @@ -180,8 +182,7 @@ public void testRegex() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -204,8 +205,7 @@ public void testFuzzy() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -228,8 +228,7 @@ public void testEarlyTermination() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -263,8 +262,7 @@ public void testSuggestDocument() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -278,18 +276,19 @@ public void testSuggestDocument() throws Exception { indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").size(numDocs); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).get(); - CompletionSuggestion completionSuggestion = searchResponse.getSuggest().getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertThat(options.getOptions().size(), equalTo(numDocs)); - int id = numDocs; - for (CompletionSuggestion.Entry.Option option : options) { - assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertThat(option.getHit(), hasId("" + id)); - assertThat(option.getHit(), hasScore((id))); - assertNotNull(option.getHit().getSourceAsMap()); - id--; - } + assertResponse(prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)), response -> { + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertThat(options.getOptions().size(), equalTo(numDocs)); + int id = numDocs; + for (CompletionSuggestion.Entry.Option option : options) { + assertThat(option.getText().toString(), equalTo("suggestion" + id)); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); + assertNotNull(option.getHit().getSourceAsMap()); + id--; + } + }); } public void testSuggestDocumentNoSource() throws Exception { @@ -299,8 +298,7 @@ public void testSuggestDocumentNoSource() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -314,20 +312,19 @@ public void testSuggestDocumentNoSource() throws Exception { indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").size(numDocs); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .setFetchSource(false) - .get(); - CompletionSuggestion completionSuggestion = searchResponse.getSuggest().getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertThat(options.getOptions().size(), equalTo(numDocs)); - int id = numDocs; - for (CompletionSuggestion.Entry.Option option : options) { - assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertThat(option.getHit(), hasId("" + id)); - assertThat(option.getHit(), hasScore((id))); - assertNull(option.getHit().getSourceAsMap()); - id--; - } + assertResponse(prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).setFetchSource(false), response -> { + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertThat(options.getOptions().size(), equalTo(numDocs)); + int id = numDocs; + for (CompletionSuggestion.Entry.Option option : options) { + assertThat(option.getText().toString(), equalTo("suggestion" + id)); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); + assertNull(option.getHit().getSourceAsMap()); + id--; + } + }); } public void testSuggestDocumentSourceFiltering() throws Exception { @@ -337,8 +334,7 @@ public void testSuggestDocumentSourceFiltering() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -354,23 +350,25 @@ public void testSuggestDocumentSourceFiltering() throws Exception { indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg").size(numDocs); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .setFetchSource("a", "b") - .get(); - CompletionSuggestion completionSuggestion = searchResponse.getSuggest().getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertThat(options.getOptions().size(), equalTo(numDocs)); - int id = numDocs; - for (CompletionSuggestion.Entry.Option option : options) { - assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertThat(option.getHit(), hasId("" + id)); - assertThat(option.getHit(), hasScore((id))); - assertNotNull(option.getHit().getSourceAsMap()); - Set sourceFields = option.getHit().getSourceAsMap().keySet(); - assertThat(sourceFields, contains("a")); - assertThat(sourceFields, not(contains("b"))); - id--; - } + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).setFetchSource("a", "b"), + response -> { + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertThat(options.getOptions().size(), equalTo(numDocs)); + int id = numDocs; + for (CompletionSuggestion.Entry.Option option : options) { + assertThat(option.getText().toString(), equalTo("suggestion" + id)); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); + assertNotNull(option.getHit().getSourceAsMap()); + Set sourceFields = option.getHit().getSourceAsMap().keySet(); + assertThat(sourceFields, contains("a")); + assertThat(sourceFields, not(contains("b"))); + id--; + } + } + ); } /** @@ -381,17 +379,19 @@ public void testSuggestEmptyIndex() throws IOException { createIndexAndMapping(mapping); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("v"); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .setFetchSource("a", "b") - .get(); - Suggest suggest = searchResponse.getSuggest(); - assertNotNull(suggest); - CompletionSuggestion completionSuggestion = suggest.getSuggestion("foo"); - CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); - assertEquals("v", options.getText().string()); - assertEquals(1, options.getLength()); - assertEquals(0, options.getOffset()); - assertEquals(0, options.options.size()); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", prefix)).setFetchSource("a", "b"), + response -> { + Suggest suggest = response.getSuggest(); + assertNotNull(suggest); + CompletionSuggestion completionSuggestion = suggest.getSuggestion("foo"); + CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); + assertEquals("v", options.getText().string()); + assertEquals(1, options.getLength()); + assertEquals(0, options.getOffset()); + assertEquals(0, options.options.size()); + } + ); } public void testThatWeightsAreWorking() throws Exception { @@ -400,8 +400,7 @@ public void testThatWeightsAreWorking() throws Exception { List similarNames = Arrays.asList("the", "The Prodigy", "The Verve", "The the"); // the weight is 1000 divided by string length, so the results are easy to to check for (String similarName : similarNames) { - client().prepareIndex(INDEX) - .setId(similarName) + prepareIndex(INDEX).setId(similarName) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -425,8 +424,7 @@ public void testThatWeightMustBeAnInteger() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -445,8 +443,7 @@ public void testThatWeightMustBeAnInteger() throws Exception { public void testThatWeightCanBeAString() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -461,22 +458,25 @@ public void testThatWeightCanBeAString() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("testSuggestions", new CompletionSuggestionBuilder(FIELD).text("test").size(10)) - ).get(); - - assertSuggestions(searchResponse, "testSuggestions", "testing"); - Suggest.Suggestion.Entry.Option option = searchResponse.getSuggest() - .getSuggestion("testSuggestions") - .getEntries() - .get(0) - .getOptions() - .get(0); - assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class))); - CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option; - - assertThat(prefixOption.getText().string(), equalTo("testing")); - assertThat((long) prefixOption.getScore(), equalTo(10L)); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("testSuggestions", new CompletionSuggestionBuilder(FIELD).text("test").size(10)) + ), + response -> { + assertSuggestions(response, "testSuggestions", "testing"); + Suggest.Suggestion.Entry.Option option = response.getSuggest() + .getSuggestion("testSuggestions") + .getEntries() + .get(0) + .getOptions() + .get(0); + assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class))); + CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option; + + assertThat(prefixOption.getText().string(), equalTo("testing")); + assertThat((long) prefixOption.getScore(), equalTo(10L)); + } + ); } public void testThatWeightMustNotBeANonNumberString() throws Exception { @@ -484,8 +484,7 @@ public void testThatWeightMustNotBeANonNumberString() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -508,8 +507,7 @@ public void testThatWeightAsStringMustBeInt() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -528,8 +526,7 @@ public void testThatWeightAsStringMustBeInt() throws Exception { public void testThatInputCanBeAStringInsteadOfAnArray() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().startObject(FIELD).field("input", "Foo Fighters").endObject().endObject()) .get(); @@ -542,8 +539,7 @@ public void testDisabledPreserveSeparators() throws Exception { completionMappingBuilder.preserveSeparators(false); createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -556,8 +552,7 @@ public void testDisabledPreserveSeparators() throws Exception { ) .get(); - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -579,15 +574,13 @@ public void testEnabledPreserveSeparators() throws Exception { completionMappingBuilder.preserveSeparators(true); createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Foo Fighters").endArray().endObject().endObject() ) .get(); - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource(jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Foof").endArray().endObject().endObject()) .get(); @@ -599,8 +592,7 @@ public void testEnabledPreserveSeparators() throws Exception { public void testThatMultipleInputsAreSupported() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -622,8 +614,7 @@ public void testThatMultipleInputsAreSupported() throws Exception { public void testThatShortSyntaxIsWorking() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startArray(FIELD).value("The Prodigy Firestarter").value("Firestarter").endArray().endObject() ) @@ -640,8 +631,7 @@ public void testThatDisablingPositionIncrementsWorkForStopwords() throws Excepti completionMappingBuilder.searchAnalyzer("classic").indexAnalyzer("classic").preservePositionIncrements(false); createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("The Beatles").endArray().endObject().endObject() ) @@ -663,8 +653,7 @@ public void testThatUpgradeToMultiFieldsWorks() throws Exception { .endObject() .endObject(); assertAcked(prepareCreate(INDEX).setMapping(mapping)); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()) .get(); @@ -691,29 +680,31 @@ public void testThatUpgradeToMultiFieldsWorks() throws Exception { .get(); assertThat(putMappingResponse.isAcknowledged(), is(true)); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) - ).get(); - assertSuggestions(searchResponse, "suggs"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) + ), + response -> assertSuggestions(response, "suggs") + ); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()) .get(); ensureGreen(INDEX); - SearchResponse afterReindexingResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) - ).get(); - assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("suggs", SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10)) + ), + response -> assertSuggestions(response, "suggs", "Foo Fighters") + ); } public void testThatFuzzySuggesterWorks() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -721,22 +712,28 @@ public void testThatFuzzySuggesterWorks() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nirv").size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nirv").size(10)) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", Fuzziness.ONE).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", Fuzziness.ONE).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsEditDistances() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -745,23 +742,32 @@ public void testThatFuzzySuggesterSupportsEditDistances() throws Exception { refresh(); // edit distance 1 - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.ONE).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.ONE).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); // edit distance 2 - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.TWO).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.TWO).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsTranspositions() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -769,25 +775,33 @@ public void testThatFuzzySuggesterSupportsTranspositions() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", FuzzyOptions.builder().setTranspositions(false).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nriv", FuzzyOptions.builder().setTranspositions(false).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", Fuzziness.ONE).size(10)) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", Fuzziness.ONE).size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsMinPrefixLength() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -795,28 +809,35 @@ public void testThatFuzzySuggesterSupportsMinPrefixLength() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nriva", FuzzyOptions.builder().setFuzzyMinLength(6).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nriva", FuzzyOptions.builder().setFuzzyMinLength(6).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nrivan", FuzzyOptions.builder().setFuzzyMinLength(6).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nrivan", FuzzyOptions.builder().setFuzzyMinLength(6).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterSupportsNonPrefixLength() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -824,28 +845,35 @@ public void testThatFuzzySuggesterSupportsNonPrefixLength() throws Exception { refresh(); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nirw", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo") + ); - searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion( - "foo", - SuggestBuilders.completionSuggestion(FIELD).prefix("Nirvo", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()).size(10) - ) - ).get(); - assertSuggestions(searchResponse, false, "foo", "Nirvana"); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion( + "foo", + SuggestBuilders.completionSuggestion(FIELD) + .prefix("Nirvo", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()) + .size(10) + ) + ), + response -> assertSuggestions(response, false, "foo", "Nirvana") + ); } public void testThatFuzzySuggesterIsUnicodeAware() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().startObject(FIELD).startArray("input").value("ööööö").endArray().endObject().endObject()) .get(); @@ -857,23 +885,28 @@ public void testThatFuzzySuggesterIsUnicodeAware() throws Exception { .prefix("öööи", FuzzyOptions.builder().setUnicodeAware(true).build()) .size(10); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)) - .get(); - assertSuggestions(searchResponse, false, "foo", "ööööö"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)), + response -> assertSuggestions(response, false, "foo", "ööööö") + ); // removing unicode awareness leads to no result completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD) .prefix("öööи", FuzzyOptions.builder().setUnicodeAware(false).build()) .size(10); - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)).get(); - assertSuggestions(searchResponse, false, "foo"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)), + response -> assertSuggestions(response, false, "foo") + ); // increasing edit distance instead of unicode awareness works again, as this is only a single character completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD) .prefix("öööи", FuzzyOptions.builder().setUnicodeAware(false).setFuzziness(Fuzziness.TWO).build()) .size(10); - searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)).get(); - assertSuggestions(searchResponse, false, "foo", "ööööö"); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", completionSuggestionBuilder)), + response -> assertSuggestions(response, false, "foo", "ööööö") + ); } public void testThatStatsAreWorking() throws Exception { @@ -901,12 +934,10 @@ public void testThatStatsAreWorking() throws Exception { assertThat(putMappingResponse.isAcknowledged(), is(true)); // Index two entities - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").field(otherField, "WHATEVER").endObject()) .get(); - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource(jsonBuilder().startObject().field(FIELD, "Bar Fighters").field(otherField, "WHATEVER2").endObject()) .get(); @@ -954,8 +985,7 @@ public void testThatStatsAreWorking() throws Exception { public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exception { createIndexAndMapping(completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("Nirvana").endArray().endObject().endObject() ) @@ -984,8 +1014,7 @@ public void testThatSuggestStopFilterWorks() throws Exception { builder.indexAnalyzer("simple"); createIndexAndMappingAndSettings(settingsBuilder.build(), builder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -999,8 +1028,7 @@ public void testThatSuggestStopFilterWorks() throws Exception { .get(); // Higher weight so it's ranked first: - client().prepareIndex(INDEX) - .setId("2") + prepareIndex(INDEX).setId("2") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1032,8 +1060,7 @@ public void testThatIndexingInvalidFieldsInCompletionFieldResultsInException() t createIndexAndMapping(builder); try { - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1065,15 +1092,14 @@ public void testSkipDuplicates() throws Exception { weights[id] = Math.max(weight, weights[id]); String suggestion = "suggestion-" + String.format(Locale.ENGLISH, "%03d", id); indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setSource( - jsonBuilder().startObject() - .startObject(FIELD) - .field("input", suggestion) - .field("weight", weight) - .endObject() - .endObject() - ) + prepareIndex(INDEX).setSource( + jsonBuilder().startObject() + .startObject(FIELD) + .field("input", suggestion) + .field("weight", weight) + .endObject() + .endObject() + ) ); } indexRandom(true, indexRequestBuilders); @@ -1088,16 +1114,17 @@ public void testSkipDuplicates() throws Exception { .skipDuplicates(true) .size(numUnique); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder) - ).get(); - assertSuggestions(searchResponse, true, "suggestions", expected); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder)), + response -> assertSuggestions(response, true, "suggestions", expected) + ); } public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)) - .get(); - assertSuggestions(searchResponse, suggestionName, suggestions); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)), + response -> assertSuggestions(response, suggestionName, suggestions) + ); } public void assertSuggestions(String suggestion, String... suggestions) { @@ -1108,11 +1135,12 @@ public void assertSuggestions(String suggestion, String... suggestions) { public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) { String suggestionName = RandomStrings.randomAsciiLettersOfLength(random(), 10); - SearchResponse searchResponse = prepareSearch(INDEX).suggest( - new SuggestBuilder().addSuggestion(suggestionName, SuggestBuilders.completionSuggestion(FIELD).text(suggestString).size(10)) - ).get(); - - assertSuggestions(searchResponse, false, suggestionName, suggestions); + assertResponse( + prepareSearch(INDEX).suggest( + new SuggestBuilder().addSuggestion(suggestionName, SuggestBuilders.completionSuggestion(FIELD).text(suggestString).size(10)) + ), + response -> assertSuggestions(response, false, suggestionName, suggestions) + ); } static void assertSuggestions(SearchResponse searchResponse, String name, String... suggestions) { @@ -1233,19 +1261,18 @@ private void createIndexAndMapping(CompletionMappingBuilder builder) throws IOEx public void testPrunedSegments() throws IOException { createIndexAndMappingAndSettings(indexSettings(1, 0).build(), completionMappingBuilder); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value("The Beatles").endArray().endObject().endObject() ) .get(); // we have 2 docs in a segment... - client().prepareIndex(INDEX).setId("2").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); + prepareIndex(INDEX).setId("2").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(actionGet); refresh(); // update the first one and then merge.. the target segment will have no value in FIELD - client().prepareIndex(INDEX).setId("1").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); + prepareIndex(INDEX).setId("1").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(actionGet); refresh(); @@ -1278,8 +1305,7 @@ public void testVeryLongInput() throws IOException { ); // can cause stack overflow without the default max_input_length String longString = replaceReservedChars(randomRealisticUnicodeOfLength(randomIntBetween(5000, 10000)), (char) 0x01); - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject().startObject(FIELD).startArray("input").value(longString).endArray().endObject().endObject() ) @@ -1308,8 +1334,7 @@ public void testReservedChars() throws IOException { String string = "foo" + (char) 0x00 + "bar"; Exception e = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex(INDEX) - .setId("1") + () -> prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1342,8 +1367,7 @@ public void testIssue5930() throws IOException { ) ); String string = "foo bar"; - client().prepareIndex(INDEX) - .setId("1") + prepareIndex(INDEX).setId("1") .setSource(jsonBuilder().startObject().field(FIELD, string).endObject()) .setRefreshPolicy(IMMEDIATE) .get(); @@ -1364,8 +1388,7 @@ public void testMultiDocSuggestions() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1399,9 +1422,9 @@ public void testSuggestWithFieldAlias() throws Exception { assertAcked(prepareCreate(INDEX).setMapping(mapping)); List builders = new ArrayList<>(); - builders.add(client().prepareIndex(INDEX).setSource(FIELD, "apple")); - builders.add(client().prepareIndex(INDEX).setSource(FIELD, "mango")); - builders.add(client().prepareIndex(INDEX).setSource(FIELD, "papaya")); + builders.add(prepareIndex(INDEX).setSource(FIELD, "apple")); + builders.add(prepareIndex(INDEX).setSource(FIELD, "mango")); + builders.add(prepareIndex(INDEX).setSource(FIELD, "papaya")); indexRandom(true, false, builders); CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("alias").text("app"); @@ -1415,8 +1438,7 @@ public void testSuggestOnlyExplain() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 1; i <= numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1429,10 +1451,10 @@ public void testSuggestOnlyExplain() throws Exception { } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); - SearchResponse searchResponse = prepareSearch(INDEX).setExplain(true) - .suggest(new SuggestBuilder().addSuggestion("foo", prefix)) - .get(); - assertSuggestions(searchResponse, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); + assertResponse( + prepareSearch(INDEX).setExplain(true).suggest(new SuggestBuilder().addSuggestion("foo", prefix)), + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + ); } public void testCompletionWithCollapse() throws Exception { @@ -1459,26 +1481,29 @@ public void testCompletionWithCollapse() throws Exception { XContentBuilder builder = jsonBuilder().startObject(); builder.startObject(suggestField).field("input", "suggestion" + i).field("weight", i).endObject(); builder.field("collapse_field", "collapse me").endObject(); // all docs the same value for collapsing - client().prepareIndex(index).setId("" + i).setSource(builder).get(); + prepareIndex(index).setId("" + i).setSource(builder).get(); } indicesAdmin().prepareRefresh(index).get(); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(suggestField).prefix("sug").size(1); - SearchResponse searchResponse = prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) - .setFrom(1) - .setSize(1) - .setCollapse(new CollapseBuilder("collapse_field")) - .suggest(new SuggestBuilder().addSuggestion("the_suggestion", prefix)) - .get(); - assertAllSuccessful(searchResponse); - - assertThat(searchResponse.getSuggest().getSuggestion("the_suggestion"), is(notNullValue())); - Suggest.Suggestion> suggestion = searchResponse.getSuggest() - .getSuggestion("the_suggestion"); - - List suggestionList = getNames(suggestion.getEntries().get(0)); - assertThat(suggestionList, contains("suggestion" + (numDocs - 1))); - assertEquals(0, searchResponse.getHits().getHits().length); + assertResponse( + prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()) + .setFrom(1) + .setSize(1) + .setCollapse(new CollapseBuilder("collapse_field")) + .suggest(new SuggestBuilder().addSuggestion("the_suggestion", prefix)), + response -> { + assertAllSuccessful(response); + + assertThat(response.getSuggest().getSuggestion("the_suggestion"), is(notNullValue())); + Suggest.Suggestion> suggestion = response.getSuggest() + .getSuggestion("the_suggestion"); + + List suggestionList = getNames(suggestion.getEntries().get(0)); + assertThat(suggestionList, contains("suggestion" + (numDocs - 1))); + assertEquals(0, response.getHits().getHits().length); + } + ); } public static boolean isReservedChar(char c) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index a526781bcc3db..22d48c2f282d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -12,7 +12,6 @@ import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -41,6 +40,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.IsEqual.equalTo; @@ -77,7 +77,7 @@ public void testContextPrefix() throws Exception { source.field("type", "type" + i % 3); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -113,7 +113,7 @@ public void testContextRegex() throws Exception { source.field("type", "type" + i % 3); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -149,7 +149,7 @@ public void testContextFuzzy() throws Exception { source.field("type", "type" + i % 3); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -168,8 +168,7 @@ public void testContextFilteringWorksWithUTF8Categories() throws Exception { LinkedHashMap> map = new LinkedHashMap<>(Collections.singletonMap("cat", contextMapping)); final CompletionMappingBuilder mapping = new CompletionMappingBuilder().context(map); createIndexAndMapping(mapping); - DocWriteResponse indexResponse = client().prepareIndex(INDEX) - .setId("1") + DocWriteResponse indexResponse = prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -198,8 +197,7 @@ public void testSingleContextFiltering() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -230,8 +228,7 @@ public void testSingleContextBoosting() throws Exception { List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { indexRequestBuilders.add( - client().prepareIndex(INDEX) - .setId("" + i) + prepareIndex(INDEX).setId("" + i) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -275,7 +272,7 @@ public void testMultiContextFiltering() throws Exception { .field("cat", "cat" + i % 2) .field("type", "type" + i % 4) .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -317,7 +314,7 @@ public void testMultiContextBoosting() throws Exception { .field("cat", "cat" + i % 2) .field("type", "type" + i % 4) .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -390,7 +387,7 @@ public void testSeveralContexts() throws Exception { source.field("type" + c, "type" + c + i % 4); } source.endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -423,7 +420,7 @@ public void testGeoFiltering() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -457,7 +454,7 @@ public void testGeoBoosting() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -490,7 +487,7 @@ public void testGeoPointContext() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD) @@ -532,7 +529,7 @@ public void testGeoNeighbours() throws Exception { .endObject() .endObject() .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } indexRandom(true, indexRequestBuilders); @@ -595,7 +592,7 @@ public void testGeoField() throws Exception { .array("input", "Hotel Amsterdam in Berlin") .endObject() .endObject(); - client().prepareIndex(INDEX).setId("1").setSource(source1).get(); + prepareIndex(INDEX).setId("1").setSource(source1).get(); XContentBuilder source2 = jsonBuilder().startObject() .startObject("location") @@ -605,7 +602,7 @@ public void testGeoField() throws Exception { .array("input", "Hotel Berlin in Amsterdam") .endObject() .endObject(); - client().prepareIndex(INDEX).setId("2").setSource(source2).get(); + prepareIndex(INDEX).setId("2").setSource(source2).get(); refresh(); @@ -619,13 +616,13 @@ public void testGeoField() throws Exception { Collections.singletonList(GeoQueryContext.builder().setGeoPoint(new GeoPoint(52.52, 13.4)).build()) ) ); - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, context)).get(); - - assertEquals(searchResponse.getSuggest().size(), 1); - assertEquals( - "Hotel Amsterdam in Berlin", - searchResponse.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string() - ); + assertResponse(prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, context)), response -> { + assertEquals(response.getSuggest().size(), 1); + assertEquals( + "Hotel Amsterdam in Berlin", + response.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string() + ); + }); } public void testSkipDuplicatesWithContexts() throws Exception { @@ -647,7 +644,7 @@ public void testSkipDuplicatesWithContexts() throws Exception { .field("cat", "cat" + id % 2) .field("type", "type" + id) .endObject(); - indexRequestBuilders.add(client().prepareIndex(INDEX).setId("" + i).setSource(source)); + indexRequestBuilders.add(prepareIndex(INDEX).setId("" + i).setSource(source)); } String[] expected = new String[numUnique]; for (int i = 0; i < numUnique; i++) { @@ -669,9 +666,10 @@ public void testSkipDuplicatesWithContexts() throws Exception { } public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { - SearchResponse searchResponse = prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)) - .get(); - CompletionSuggestSearchIT.assertSuggestions(searchResponse, suggestionName, suggestions); + assertResponse( + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion(suggestionName, suggestBuilder)), + response -> CompletionSuggestSearchIT.assertSuggestions(response, suggestionName, suggestions) + ); } private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index 95eb0f055b830..bade23e193e75 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; @@ -53,7 +52,9 @@ import static org.elasticsearch.search.suggest.SuggestBuilders.termSuggestion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionPhraseCollateMatchExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionSize; @@ -245,8 +246,10 @@ public void testSizeOneShard() throws Exception { } refresh(); - SearchResponse search = prepareSearch().setQuery(matchQuery("text", "spellchecker")).get(); - assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); + assertResponse( + prepareSearch().setQuery(matchQuery("text", "spellchecker")), + response -> assertThat("didn't ask for suggestions but got some", response.getSuggest(), nullValue()) + ); TermSuggestionBuilder termSuggestion = termSuggestion("text").suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can // vary between requests. @@ -292,9 +295,9 @@ public void testUnmappedField() throws IOException, InterruptedException, Execut indexRandom( true, - client().prepareIndex("test").setSource("name", "I like iced tea"), - client().prepareIndex("test").setSource("name", "I like tea."), - client().prepareIndex("test").setSource("name", "I like ice cream.") + prepareIndex("test").setSource("name", "I like iced tea"), + prepareIndex("test").setSource("name", "I like tea."), + prepareIndex("test").setSource("name", "I like ice cream.") ); refresh(); @@ -329,8 +332,10 @@ public void testSimple() throws Exception { indexDoc("test", "4", "text", "abcc"); refresh(); - SearchResponse search = prepareSearch().setQuery(matchQuery("text", "spellcecker")).get(); - assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); + assertResponse( + prepareSearch().setQuery(matchQuery("text", "spellcecker")), + response -> assertThat("didn't ask for suggestions but got some", response.getSuggest(), nullValue()) + ); TermSuggestionBuilder termSuggest = termSuggestion("text").suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary // between requests. @@ -776,9 +781,9 @@ public void testDifferentShardSize() throws Exception { ensureGreen(); indexRandom( true, - client().prepareIndex("test").setId("1").setSource("field1", "foobar1").setRouting("1"), - client().prepareIndex("test").setId("2").setSource("field1", "foobar2").setRouting("2"), - client().prepareIndex("test").setId("3").setSource("field1", "foobar3").setRouting("3") + prepareIndex("test").setId("1").setSource("field1", "foobar1").setRouting("1"), + prepareIndex("test").setId("2").setSource("field1", "foobar2").setRouting("2"), + prepareIndex("test").setId("3").setSource("field1", "foobar3").setRouting("3") ); Suggest suggest = searchSuggest( @@ -836,14 +841,14 @@ public void testShardFailures() throws IOException, InterruptedException { assertRequestBuilderThrows(request, SearchPhaseExecutionException.class); // When searching on a shard which does not hold yet any document of an existing type, we should not fail - SearchResponse searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - ElasticsearchAssertions.assertNoFailures(searchResponse); - ElasticsearchAssertions.assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions"); + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> assertSuggestion(response.getSuggest(), 0, 0, "did_you_mean", "testing suggestions") + ); } // see #3469 @@ -876,17 +881,19 @@ public void testEmptyShards() throws IOException, InterruptedException { ensureGreen(); // test phrase suggestion on completely empty index - SearchResponse searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - - assertNoFailures(searchResponse); - Suggest suggest = searchResponse.getSuggest(); - assertSuggestionSize(suggest, 0, 0, "did_you_mean"); - assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> { + assertNoFailures(response); + Suggest suggest = response.getSuggest(); + assertSuggestionSize(suggest, 0, 0, "did_you_mean"); + assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); + } + ); indexDoc("test", "11", "foo", "bar"); indexDoc("test", "12", "foo", "bar"); @@ -894,33 +901,34 @@ public void testEmptyShards() throws IOException, InterruptedException { refresh(); // test phrase suggestion but nothing matches - searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - - assertNoFailures(searchResponse); - suggest = searchResponse.getSuggest(); - assertSuggestionSize(suggest, 0, 0, "did_you_mean"); - assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); - + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> { + Suggest suggest = response.getSuggest(); + assertSuggestionSize(suggest, 0, 0, "did_you_mean"); + assertThat(suggest.getSuggestion("did_you_mean").getEntries().get(0).getText().string(), equalTo("tetsting sugestion")); + } + ); // finally indexing a document that will produce some meaningful suggestion indexDoc("test", "1", "name", "Just testing the suggestions api"); refresh(); - searchResponse = prepareSearch().setSize(0) - .suggest( - new SuggestBuilder().setGlobalText("tetsting sugestion") - .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) - ) - .get(); - - assertNoFailures(searchResponse); - suggest = searchResponse.getSuggest(); - assertSuggestionSize(suggest, 0, 3, "did_you_mean"); - assertSuggestion(suggest, 0, 0, "did_you_mean", "testing suggestions"); + assertNoFailuresAndResponse( + prepareSearch().setSize(0) + .suggest( + new SuggestBuilder().setGlobalText("tetsting sugestion") + .addSuggestion("did_you_mean", phraseSuggestion("name").maxErrors(5.0f)) + ), + response -> { + Suggest suggest = response.getSuggest(); + assertSuggestionSize(suggest, 0, 3, "did_you_mean"); + assertSuggestion(suggest, 0, 0, "did_you_mean", "testing suggestions"); + } + ); } /** @@ -1110,7 +1118,7 @@ public void testSuggestWithManyCandidates() throws InterruptedException, Executi List builders = new ArrayList<>(); for (String title : titles) { - builders.add(client().prepareIndex("test").setSource("title", title)); + builders.add(prepareIndex("test").setSource("title", title)); } indexRandom(true, builders); @@ -1148,9 +1156,9 @@ public void testSuggestWithFieldAlias() throws Exception { assertAcked(prepareCreate("test").setMapping(mapping)); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "mango")); - builders.add(client().prepareIndex("test").setSource("text", "papaya")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "mango")); + builders.add(prepareIndex("test").setSource("text", "papaya")); indexRandom(true, false, builders); TermSuggestionBuilder termSuggest = termSuggestion("alias").text("appple"); @@ -1173,10 +1181,10 @@ public void testPhraseSuggestMinDocFreq() throws Exception { assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).build()).setMapping(mapping)); List builders = new ArrayList<>(); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "apple")); - builders.add(client().prepareIndex("test").setSource("text", "appfle")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "apple")); + builders.add(prepareIndex("test").setSource("text", "appfle")); indexRandom(true, false, builders); PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("text").text("appple") @@ -1286,7 +1294,7 @@ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionE List builders = new ArrayList<>(); for (String title : titles) { - builders.add(client().prepareIndex("test").setSource("title", title)); + builders.add(prepareIndex("test").setSource("title", title)); } indexRandom(true, builders); @@ -1420,8 +1428,11 @@ protected Suggest searchSuggest(String suggestText, int expectShardsFailed, Map< suggestBuilder.addSuggestion(suggestion.getKey(), suggestion.getValue()); } builder.suggest(suggestBuilder); - SearchResponse actionGet = builder.get(); - assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed)); - return actionGet.getSuggest(); + Suggest[] suggest = new Suggest[1]; + assertResponse(builder, response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(expectShardsFailed)); + suggest[0] = response.getSuggest(); + }); + return suggest[0]; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java index 5c1f925bddc49..55dca7810f845 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/similarity/SimilarityIT.java @@ -8,11 +8,11 @@ package org.elasticsearch.similarity; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -20,7 +20,7 @@ public class SimilarityIT extends ESIntegTestCase { public void testCustomBM25Similarity() throws Exception { try { - indicesAdmin().prepareDelete("test").execute().actionGet(); + indicesAdmin().prepareDelete("test").get(); } catch (Exception e) { // ignore } @@ -45,24 +45,21 @@ public void testCustomBM25Similarity() throws Exception { .setSettings( indexSettings(1, 0).put("similarity.custom.type", "BM25").put("similarity.custom.k1", 2.0f).put("similarity.custom.b", 0.5f) ) - .execute() - .actionGet(); + .get(); - client().prepareIndex("test") - .setId("1") + prepareIndex("test").setId("1") .setSource("field1", "the quick brown fox jumped over the lazy dog", "field2", "the quick brown fox jumped over the lazy dog") .setRefreshPolicy(IMMEDIATE) - .execute() - .actionGet(); + .get(); - SearchResponse bm25SearchResponse = prepareSearch().setQuery(matchQuery("field1", "quick brown fox")).execute().actionGet(); - assertThat(bm25SearchResponse.getHits().getTotalHits().value, equalTo(1L)); - float bm25Score = bm25SearchResponse.getHits().getHits()[0].getScore(); - - SearchResponse booleanSearchResponse = prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); - assertThat(booleanSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - float defaultScore = booleanSearchResponse.getHits().getHits()[0].getScore(); - - assertThat(bm25Score, not(equalTo(defaultScore))); + assertResponse(prepareSearch().setQuery(matchQuery("field1", "quick brown fox")), bm25SearchResponse -> { + assertThat(bm25SearchResponse.getHits().getTotalHits().value, equalTo(1L)); + float bm25Score = bm25SearchResponse.getHits().getHits()[0].getScore(); + assertResponse(prepareSearch().setQuery(matchQuery("field2", "quick brown fox")), booleanSearchResponse -> { + assertThat(booleanSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + float defaultScore = booleanSearchResponse.getHits().getHits()[0].getScore(); + assertThat(bm25Score, not(equalTo(defaultScore))); + }); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index ca522064e3d04..42c19a903b452 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.RemoteTransportException; import java.io.IOException; import java.nio.file.Files; @@ -768,7 +769,18 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception ensureStableCluster(3); awaitNoMoreRunningOperations(); - expectThrows(RepositoryException.class, deleteFuture::actionGet); + var innerException = expectThrows(ExecutionException.class, RuntimeException.class, deleteFuture::get); + + // There may be many layers of RTE to unwrap here, see https://github.com/elastic/elasticsearch/issues/102351. + // ExceptionsHelper#unwrapCause gives up at 10 layers of wrapping so we must unwrap more tenaciously by hand here: + while (true) { + if (innerException instanceof RemoteTransportException remoteTransportException) { + innerException = asInstanceOf(RuntimeException.class, remoteTransportException.getCause()); + } else { + assertThat(innerException, instanceOf(RepositoryException.class)); + break; + } + } } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws Exception { @@ -948,7 +960,7 @@ public void testQueuedSnapshotsWaitingForShardReady() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(testIndex).execute().actionGet().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(testIndex).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1304,7 +1316,7 @@ public void testConcurrentOperationsLimit() throws Exception { final ConcurrentSnapshotExecutionException cse = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail").execute().actionGet() + () -> clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail").get() ); assertThat( cse.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index abb72286f971f..e7bc6f13383d1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -65,7 +65,7 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { createIndex("test-idx-1"); logger.info("--> indexing some data"); - indexRandom(true, client().prepareIndex("test-idx-1").setSource("foo", "bar")); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar")); final String snapshot = "test-snap"; @@ -111,11 +111,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); final String snapshot = "test-snap"; @@ -186,11 +182,7 @@ public void testFindDanglingLatestGeneration() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); final String snapshot = "test-snap"; @@ -555,11 +547,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio final String[] indices = { "test-idx-1", "test-idx-2" }; createIndex(indices); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -608,11 +596,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -655,11 +639,7 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -714,9 +694,9 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { createIndex("test-idx-1", "test-idx-2"); indexRandom( true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") + prepareIndex("test-idx-1").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar") ); flushAndRefresh("test-idx-1", "test-idx-2"); @@ -760,11 +740,7 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); - indexRandom( - true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") - ); + indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); @@ -821,14 +797,14 @@ private void assertRepositoryBlocked(String repo, String existingSnapshot) { logger.info("--> try to delete snapshot"); final RepositoryException ex = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot).execute().actionGet() + () -> clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot).get() ); assertThat(ex.getMessage(), containsString("concurrent modification of the index-N file")); logger.info("--> try to create snapshot"); final RepositoryException ex2 = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot).execute().actionGet() + () -> clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot).get() ); assertThat(ex2.getMessage(), containsString("The repository has been disabled to prevent data corruption")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 030ea42d53f13..3a72ab792f571 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -62,8 +62,7 @@ public void testShouldNotRestoreRepositoryMetadata() { .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); logger.info("make sure old repository wasn't restored"); assertRequestBuilderThrows(clusterAdmin().prepareGetRepositories("test-repo-1"), RepositoryMissingException.class); @@ -104,8 +103,7 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); logger.info("check that custom persistent metadata [{}] is correctly restored", metadata); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index e188c11125c42..59fc54347d1d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -92,6 +92,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; @@ -104,6 +105,7 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { @@ -194,18 +196,30 @@ public void testSnapshotWithStuckNode() throws Exception { } logger.info("--> making sure that snapshot no longer exists"); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() - ); + expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); - logger.info("--> Go through a loop of creating and deleting a snapshot to trigger repository cleanup"); + logger.info("--> trigger repository cleanup"); clusterAdmin().prepareCleanupRepository("test-repo").get(); - // Expect two files to remain in the repository: - // (1) index-(N+1) - // (2) index-latest - assertFileCount(repo, 2); + // Expect two or three files to remain in the repository: + // (1) index-latest + // (2) index-(N+1) + // (3) index-N (maybe: a fully successful deletion removes this, but cleanup does not, see #100718) + + final var blobPaths = getAllFilesInDirectoryAndDescendants(repo); + final var blobPathsString = blobPaths.toString(); + assertTrue(blobPathsString, blobPaths.remove(repo.resolve(BlobStoreRepository.INDEX_LATEST_BLOB))); + assertThat(blobPathsString, blobPaths, anyOf(hasSize(1), hasSize(2))); + final var repoGenerations = blobPaths.stream().mapToLong(blobPath -> { + final var blobName = repo.relativize(blobPath).toString(); + assertThat(blobPathsString, blobName, startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)); + return Long.parseLong(blobName.substring(BlobStoreRepository.INDEX_FILE_PREFIX.length())); + }).toArray(); + + if (repoGenerations.length == 2) { + assertEquals(blobPathsString, 1, Math.abs(repoGenerations[0] - repoGenerations[1])); + } + logger.info("--> done"); } @@ -223,13 +237,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> shutdown one of the nodes"); internalCluster().stopRandomDataNode(); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setTimeout("1m") - .setWaitForNodes("<2") - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2").get().isTimedOut(), equalTo(false) ); @@ -258,8 +266,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) - .execute() - .actionGet() + .get() ); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); @@ -269,8 +276,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(false) .setPartial(true) - .execute() - .actionGet(); + .get(); assertBusy(() -> { SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap-2") @@ -304,8 +310,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) .setPartial(true) - .execute() - .actionGet(); + .get(); logger.info( "State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), @@ -333,8 +338,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setRestoreGlobalState(false) .setIndices("test-idx-all") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6)); @@ -374,8 +378,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { .setRestoreGlobalState(false) .setIndices("test-idx-closed") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(4)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(4)); @@ -413,13 +416,7 @@ public boolean clearData(String nodeName) { }); assertThat( - clusterAdmin().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setTimeout("1m") - .setWaitForNodes("2") - .execute() - .actionGet() - .isTimedOut(), + clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("2").get().isTimedOut(), equalTo(false) ); @@ -704,7 +701,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "init").execute().actionGet(); + prepareIndex(indexName).setSource("test", "init").get(); } final Path repoPath = randomRepoPath(); @@ -737,7 +734,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { // add few docs - less than initially docs = between(1, 5); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "test" + i).execute().actionGet(); + prepareIndex(indexName).setSource("test", "test" + i).get(); } // create another snapshot @@ -789,7 +786,7 @@ public void testDeduplicateIndexMetadata() throws Exception { int docs = between(10, 100); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "init").execute().actionGet(); + prepareIndex(indexName).setSource("test", "init").get(); } final Path repoPath = randomRepoPath(); @@ -801,7 +798,7 @@ public void testDeduplicateIndexMetadata() throws Exception { docs = between(1, 5); for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("test", "test" + i).execute().actionGet(); + prepareIndex(indexName).setSource("test", "test" + i).get(); } logger.info("--> restart random data node and add new data node to change index allocation"); @@ -818,7 +815,7 @@ public void testDeduplicateIndexMetadata() throws Exception { // index to some other field to trigger a change in index metadata for (int i = 0; i < docs; i++) { - client().prepareIndex(indexName).setSource("new_field", "test" + i).execute().actionGet(); + prepareIndex(indexName).setSource("new_field", "test" + i).get(); } createFullSnapshot(repositoryName, snapshot2); @@ -961,7 +958,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { logger.debug("--> indexing {} docs into {}", snapshotDocCount, indexName); IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[snapshotDocCount]; for (int i = 0; i < snapshotDocCount; i++) { - indexRequestBuilders[i] = client().prepareIndex(indexName).setSource("field", "value"); + indexRequestBuilders[i] = prepareIndex(indexName).setSource("field", "value"); } indexRandom(true, indexRequestBuilders); assertDocCount(indexName, snapshotDocCount); @@ -986,7 +983,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { logger.debug("--> indexing {} extra docs into {}", extraDocCount, indexName); indexRequestBuilders = new IndexRequestBuilder[extraDocCount]; for (int i = 0; i < extraDocCount; i++) { - indexRequestBuilders[i] = client().prepareIndex(indexName).setSource("field", "value"); + indexRequestBuilders[i] = prepareIndex(indexName).setSource("field", "value"); } indexRandom(true, indexRequestBuilders); } @@ -1108,7 +1105,7 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(indexName).execute().actionGet().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(indexName).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1166,7 +1163,7 @@ public void testDeleteIndexDuringSnapshot() throws Exception { final int concurrentLoops = randomIntBetween(2, 5); final List> futures = new ArrayList<>(concurrentLoops); for (int i = 0; i < concurrentLoops; i++) { - final PlainActionFuture future = PlainActionFuture.newFuture(); + final PlainActionFuture future = new PlainActionFuture<>(); futures.add(future); startSnapshotDeleteLoop(repoName, indexName, "test-snap-" + i, future); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index 32a1d6724e0fd..1f86d4cb39ea4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -48,7 +48,6 @@ protected Collection> nodePlugins() { } /** Check that the reset method cleans up a feature */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/97780") public void testResetSystemIndices() throws Exception { String systemIndex1 = ".test-system-idx-1"; String systemIndex2 = ".second-test-system-idx-1"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index d171dd2c89c78..3b129455d4eef 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -56,11 +56,11 @@ public void testWhenMetadataAreLoaded() throws Exception { createIndex("docs"); indexRandom( true, - client().prepareIndex("docs").setId("1").setSource("rank", 1), - client().prepareIndex("docs").setId("2").setSource("rank", 2), - client().prepareIndex("docs").setId("3").setSource("rank", 3), - client().prepareIndex("others").setSource("rank", 4), - client().prepareIndex("others").setSource("rank", 5) + prepareIndex("docs").setId("1").setSource("rank", 1), + prepareIndex("docs").setId("2").setSource("rank", 2), + prepareIndex("docs").setId("3").setSource("rank", 3), + prepareIndex("others").setSource("rank", 4), + prepareIndex("others").setSource("rank", 5) ); createRepository("repository", CountingMockRepositoryPlugin.TYPE); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index 0a2f00b6e0949..e6bae861e1d04 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -82,7 +82,7 @@ public Path nodeConfigPath(int nodeOrdinal) { @After public void stopSecondCluster() throws IOException { - IOUtils.close(secondCluster); + IOUtils.close(secondCluster::close); } @Override @@ -127,7 +127,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true).execute().actionGet() + () -> clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true).get() ); assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java index 429c5c1a136c2..0f0858982b4ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java @@ -61,8 +61,7 @@ public void testRepositoryThrottlingStats() throws Exception { .setRenamePattern("test-") .setRenameReplacement("test2-") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index cd34f68471156..20313767c0677 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -309,8 +309,7 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); @@ -319,7 +318,7 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { assertThat(mappings.sourceAsMap().toString(), not(containsString("foo"))); logger.info("--> assert that old settings are restored"); - GetSettingsResponse getSettingsResponse = indicesAdmin().prepareGetSettings("test-idx").execute().actionGet(); + GetSettingsResponse getSettingsResponse = indicesAdmin().prepareGetSettings("test-idx").get(); assertThat(getSettingsResponse.getSetting("test-idx", "index.refresh_interval"), equalTo("10s")); } @@ -352,8 +351,7 @@ public void testRestoreAliases() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) - .execute() - .actionGet(); + .get(); // We don't restore any indices here assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -379,8 +377,7 @@ public void testRestoreAliases() throws Exception { .setWaitForCompletion(true) .setRestoreGlobalState(true) .setIncludeAliases(false) - .execute() - .actionGet(); + .get(); // We don't restore any indices here assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -431,8 +428,7 @@ public void testRestoreTemplates() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) - .execute() - .actionGet(); + .get(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -470,8 +466,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx-1-copy", 100L); @@ -487,8 +482,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx-1-copy", 100L); @@ -504,8 +498,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+-2)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> delete indices"); @@ -519,8 +512,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("(.+)") .setRenameReplacement("same-name") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -534,8 +526,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("test-idx-2") .setRenameReplacement("test-idx-1") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -550,8 +541,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern(".+") .setRenameReplacement("__WRONG__") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (InvalidIndexNameException ex) { // Expected @@ -566,8 +556,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern(".+") .setRenameReplacement("alias-3") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (InvalidIndexNameException ex) { // Expected @@ -582,8 +571,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("test-idx") .setRenameReplacement("alias") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -598,8 +586,7 @@ public void testRenameOnRestore() throws Exception { .setRenamePattern("test-idx-1") .setRenameReplacement("alias-2") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); fail("Shouldn't be here"); } catch (SnapshotRestoreException ex) { // Expected @@ -614,8 +601,7 @@ public void testRenameOnRestore() throws Exception { .setRenameReplacement("alias") .setWaitForCompletion(true) .setIncludeAliases(false) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); } @@ -683,7 +669,7 @@ public void testChangeSettingsOnRestore() throws Exception { final int numdocs = randomIntBetween(10, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { - builders[i] = client().prepareIndex("test-idx").setId(Integer.toString(i)).setSource("field1", "Foo bar " + i); + builders[i] = prepareIndex("test-idx").setId(Integer.toString(i)).setSource("field1", "Foo bar " + i); } indexRandom(true, builders); flushAndRefresh(); @@ -740,12 +726,11 @@ public void testChangeSettingsOnRestore() throws Exception { .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that correct settings are restored"); - GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet(); + GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").get(); assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL_SETTING.getKey()), equalTo("5s")); // Make sure that number of shards didn't change assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); @@ -764,12 +749,11 @@ public void testChangeSettingsOnRestore() throws Exception { .setIgnoreIndexSettings("*") // delete everything we can delete .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that correct settings are restored and index is still functional"); - getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet(); + getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").get(); assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL_SETTING.getKey()), equalTo("5s")); // Make sure that number of shards didn't change assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); @@ -839,8 +823,7 @@ public void testRecreateBlocksOnRestore() throws Exception { .prepareRestoreSnapshot("test-repo", "test-snap") .setIndexSettings(changedSettings) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ClusterBlocks blocks = client.admin().cluster().prepareState().clear().setBlocks(true).get().getState().blocks(); @@ -906,7 +889,7 @@ public void testFailOnAncientVersion() throws Exception { final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).execute().actionGet() + () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).get() ); assertThat( snapshotRestoreException.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 7f5cacdfc935a..abc2bf8fb7219 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -188,8 +188,7 @@ public void testBasicWorkFlow() throws Exception { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -217,8 +216,7 @@ public void testBasicWorkFlow() throws Exception { restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -296,8 +294,7 @@ public void testFreshIndexUUID() { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -314,8 +311,7 @@ public void testFreshIndexUUID() { .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); String copyRestoreUUID = indicesAdmin().prepareGetSettings("test-copy") @@ -545,8 +541,7 @@ public void testDataFileCorruptionDuringRestore() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().failedShards(), @@ -750,8 +745,7 @@ public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Except .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); assertDocCount("test-idx", 100L); @@ -839,8 +833,7 @@ public void testDeleteSnapshot() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", lastSnapshot) .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 10L * numberOfSnapshots); @@ -911,8 +904,7 @@ public void testMoveShardWhileSnapshotting() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100L); } @@ -950,7 +942,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode); try { - client.admin().cluster().prepareDeleteRepository(randomFrom("test-repo", "test-*", "*")).execute().actionGet(); + client.admin().cluster().prepareDeleteRepository(randomFrom("test-repo", "test-*", "*")).get(); fail("shouldn't be able to delete in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository deletion failed"); @@ -1002,8 +994,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100); } @@ -1037,8 +1028,7 @@ public void testReadonlyRepository() throws Exception { .prepareRestoreSnapshot("readonly-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100L); @@ -1118,7 +1108,7 @@ public void testSnapshotStatus() throws Exception { }); logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode); - SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet(); + SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").get(); assertThat(response.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1133,7 +1123,7 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking snapshot status for all currently running and snapshot with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); + response = client.admin().cluster().prepareSnapshotStatus().get(); assertThat(response.getSnapshots().size(), equalTo(1)); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1148,12 +1138,7 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking that _current returns the currently running snapshot"); - GetSnapshotsResponse getResponse = client.admin() - .cluster() - .prepareGetSnapshots("test-repo") - .setCurrentSnapshot() - .execute() - .actionGet(); + GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().get(); assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS)); @@ -1170,7 +1155,7 @@ public void testSnapshotStatus() throws Exception { logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); logger.info("--> checking snapshot status again after snapshot is done"); - response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").execute().actionGet(); + response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").get(); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); assertThat(snapshotStatus.includeGlobalState(), equalTo(false)); @@ -1183,19 +1168,12 @@ public void testSnapshotStatus() throws Exception { assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); logger.info("--> checking snapshot status after it is done with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); + response = client.admin().cluster().prepareSnapshotStatus().get(); assertThat(response.getSnapshots().size(), equalTo(0)); logger.info("--> checking that _current no longer returns the snapshot"); assertThat( - client.admin() - .cluster() - .prepareGetSnapshots("test-repo") - .addSnapshots("_current") - .execute() - .actionGet() - .getSnapshots() - .isEmpty(), + client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").get().getSnapshots().isEmpty(), equalTo(true) ); @@ -1241,7 +1219,7 @@ public void testSnapshotRelocatingPrimary() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth("test-idx").execute().actionGet().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth("test-idx").get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1532,9 +1510,9 @@ public void testListCorruptedSnapshot() throws Exception { logger.info("--> indexing some data"); indexRandom( true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar"), - client().prepareIndex("test-idx-3").setSource("foo", "bar") + prepareIndex("test-idx-1").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar"), + prepareIndex("test-idx-3").setSource("foo", "bar") ); createSnapshot("test-repo", "test-snap-1", Collections.singletonList("test-idx-*")); @@ -1575,9 +1553,9 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { createIndex("test-idx-1", "test-idx-2"); indexRandom( true, - client().prepareIndex("test-idx-1").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar"), - client().prepareIndex("test-idx-2").setSource("foo", "bar") + prepareIndex("test-idx-1").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar"), + prepareIndex("test-idx-2").setSource("foo", "bar") ); flushAndRefresh("test-idx-1", "test-idx-2"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index bba6a2ae1a6b6..df2cf31e37470 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -38,8 +38,7 @@ public void testExceptionWhenRestoringPersistentSettings() { .cluster() .prepareUpdateSettings() .setPersistentSettings(Settings.builder().put(BrokenSettingPlugin.BROKEN_SETTING.getKey(), value)) - .execute() - .actionGet(); + .get(); Consumer assertSettingValue = value -> assertThat( client.admin() @@ -47,8 +46,7 @@ public void testExceptionWhenRestoringPersistentSettings() { .prepareState() .setRoutingTable(false) .setNodes(false) - .execute() - .actionGet() + .get() .getState() .getMetadata() .persistentSettings() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index c307990b1a244..05888fd776641 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -157,8 +157,7 @@ public void testIncludeGlobalState() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state") .setWaitForCompletion(true) .setRestoreGlobalState(false) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); @@ -169,8 +168,7 @@ public void testIncludeGlobalState() throws Exception { restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state") .setWaitForCompletion(true) .setRestoreGlobalState(true) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); if (testTemplate) { @@ -226,8 +224,7 @@ public void testIncludeGlobalState() throws Exception { restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index") .setWaitForCompletion(true) .setRestoreGlobalState(false) - .execute() - .actionGet(); + .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index f8e2ed03a3e39..cc7c7709075c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -138,10 +138,7 @@ public void testExceptionOnMissingSnapBlob() throws IOException { logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet() - ); + expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); } public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { @@ -172,7 +169,7 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { expectThrows( SnapshotMissingException.class, - () -> clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").execute().actionGet() + () -> clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get() ); } @@ -459,11 +456,7 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { expectThrows( SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo") - .setSnapshots(notExistedSnapshotName) - .setIgnoreUnavailable(false) - .execute() - .actionGet() + () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false).get() ); logger.info("--> unblock all data nodes"); @@ -677,8 +670,9 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { }, 60, TimeUnit.SECONDS); for (ActionFuture status : statuses) { - assertThat(status.get().getSnapshots(), hasSize(snapshots)); - for (SnapshotStatus snapshot : status.get().getSnapshots()) { + var statusResponse = status.get(); + assertThat(statusResponse.getSnapshots(), hasSize(snapshots)); + for (SnapshotStatus snapshot : statusResponse.getSnapshots()) { assertThat(snapshot.getState(), allOf(not(SnapshotsInProgress.State.FAILED), not(SnapshotsInProgress.State.ABORTED))); for (final var shard : snapshot.getShards()) { if (shard.getStage() == SnapshotIndexShardStage.DONE) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 4721b1a186a99..7eaa49b27007d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -55,6 +55,7 @@ import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; @@ -496,6 +497,11 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio final String[] indicesToRestore = indicesToRestoreList.toArray(new String[0]); final String[] indicesToClose = indicesToCloseList.toArray(new String[0]); final String[] indicesToDelete = indicesToDeleteList.toArray(new String[0]); + final String indicesToRestoreDescription = (restoreSpecificIndices ? "" : "*=") + Arrays.toString(indicesToRestore); + + if (restoreSpecificIndices == false) { + assertEquals(Set.copyOf(snapshotInfo.indices()), Set.of(indicesToRestore)); + } final ListenableFuture closeIndicesStep = new ListenableFuture<>(); final ListenableFuture deleteIndicesStep = new ListenableFuture<>(); @@ -515,15 +521,17 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio ); logger.info( - "--> closing indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> closing indices {} in preparation for restoring {} from [{}:{}]", + indicesToClose, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); indicesAdmin().prepareClose(indicesToClose).execute(mustSucceed(closeIndexResponse -> { logger.info( - "--> finished closing indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> finished closing indices {} in preparation for restoring {} from [{}:{}]", + indicesToClose, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -538,15 +546,17 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio if (indicesToDelete.length > 0) { logger.info( - "--> deleting indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> deleting indices {} in preparation for restoring {} from [{}:{}]", + indicesToDelete, + indicesToRestore, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); indicesAdmin().prepareDelete(indicesToDelete).execute(mustSucceed(deleteIndicesResponse -> { logger.info( - "--> finished deleting indices {} in preparation for restoring from [{}:{}]", - indicesToRestoreList, + "--> finished deleting indices {} in preparation for restoring {} from [{}:{}]", + indicesToDelete, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -569,9 +579,8 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio } logger.info( - "--> restoring indices {}{} from [{}:{}]", - restoreSpecificIndices ? "" : "*=", - indicesToRestoreList, + "--> restoring indices {} from [{}:{}]", + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -579,7 +588,7 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio restoreSnapshotRequestBuilder.execute(mustSucceed(restoreSnapshotResponse -> { logger.info( "--> triggered restore of indices {} from [{}:{}], waiting for green health", - indicesToRestoreList, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -590,7 +599,7 @@ private void restoreSnapshot(SnapshotInfo snapshotInfo, Releasable releasePrevio logger.info( "--> indices {} successfully restored from [{}:{}]", - indicesToRestoreList, + indicesToRestoreDescription, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index d6dad537afaea..841f77ea7efab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -48,17 +48,16 @@ public void testThreadNames() throws Exception { int numDocs = randomIntBetween(2, 100); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field("str_value", "s" + i) - .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) - .field("l_value", i) - .array("l_values", new int[] { i * 2, i * 2 + 1 }) - .field("d_value", i) - .array("d_values", new double[] { i * 2, i * 2 + 1 }) - .endObject() - ); + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("str_value", "s" + i) + .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) + .field("l_value", i) + .array("l_values", new int[] { i * 2, i * 2 + 1 }) + .field("d_value", i) + .array("d_values", new double[] { i * 2, i * 2 + 1 }) + .endObject() + ); } indexRandom(true, builders); int numSearches = randomIntBetween(2, 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java index b3cb2e5f178ca..a62560588bdb2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java @@ -46,18 +46,16 @@ public void testDimensionFieldNameLimit() throws IOException { ); final Exception ex = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test") - .setSource( - "routing_field", - randomAlphaOfLength(10), - dimensionFieldName, - randomAlphaOfLength(1024), - "gauge", - randomIntBetween(10, 20), - "@timestamp", - Instant.now().toEpochMilli() - ) - .get() + () -> prepareIndex("test").setSource( + "routing_field", + randomAlphaOfLength(10), + dimensionFieldName, + randomAlphaOfLength(1024), + "gauge", + randomIntBetween(10, 20), + "@timestamp", + Instant.now().toEpochMilli() + ).get() ); assertThat( ex.getCause().getMessage(), @@ -76,14 +74,18 @@ public void testDimensionFieldValueLimit() throws IOException { dimensionFieldLimit ); long startTime = Instant.now().toEpochMilli(); - client().prepareIndex("test") - .setSource("field", randomAlphaOfLength(1024), "gauge", randomIntBetween(10, 20), "@timestamp", startTime) + prepareIndex("test").setSource("field", randomAlphaOfLength(1024), "gauge", randomIntBetween(10, 20), "@timestamp", startTime) .get(); final Exception ex = expectThrows( DocumentParsingException.class, - () -> client().prepareIndex("test") - .setSource("field", randomAlphaOfLength(1025), "gauge", randomIntBetween(10, 20), "@timestamp", startTime + 1) - .get() + () -> prepareIndex("test").setSource( + "field", + randomAlphaOfLength(1025), + "gauge", + randomIntBetween(10, 20), + "@timestamp", + startTime + 1 + ).get() ); assertThat(ex.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1025].")); } @@ -141,7 +143,7 @@ public void testTotalDimensionFieldsSizeLuceneLimit() throws IOException { for (int i = 0; i < dimensionFieldLimit; i++) { source.put(dimensionFieldNames.get(i), randomAlphaOfLength(1024)); } - final DocWriteResponse indexResponse = client().prepareIndex("test").setSource(source).get(); + final DocWriteResponse indexResponse = prepareIndex("test").setSource(source).get(); assertEquals(RestStatus.CREATED.getStatus(), indexResponse.status().getStatus()); } @@ -167,7 +169,7 @@ public void testTotalDimensionFieldsSizeLuceneLimitPlusOne() throws IOException for (int i = 0; i < dimensionFieldLimit; i++) { source.put(dimensionFieldNames.get(i), randomAlphaOfLength(1024)); } - final Exception ex = expectThrows(DocumentParsingException.class, () -> client().prepareIndex("test").setSource(source).get()); + final Exception ex = expectThrows(DocumentParsingException.class, () -> prepareIndex("test").setSource(source).get()); assertEquals("_tsid longer than [32766] bytes [33903].", ex.getCause().getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java index 4e97560284c67..813ff8b4227bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java @@ -151,26 +151,24 @@ public void testUpsert() throws Exception { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setScript(fieldIncScript) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1")); } updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setScript(fieldIncScript) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2")); } } @@ -194,13 +192,12 @@ public void testScriptedUpsert() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, UPSERT_SCRIPT, params)) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.CREATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("9")); } @@ -209,13 +206,12 @@ public void testScriptedUpsert() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject()) .setScriptedUpsert(true) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, UPSERT_SCRIPT, params)) - .execute() - .actionGet(); + .get(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("7")); } } @@ -228,8 +224,7 @@ public void testUpsertDoc() throws Exception { .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setDocAsUpsert(true) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); assertThat(updateResponse.getGetResult().getIndex(), equalTo("test")); @@ -259,8 +254,7 @@ public void testUpsertFields() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, PUT_VALUES_SCRIPT, Collections.singletonMap("extra", "foo"))) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -272,8 +266,7 @@ public void testUpsertFields() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, PUT_VALUES_SCRIPT, Collections.singletonMap("extra", "foo"))) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -287,8 +280,7 @@ public void testIndexAutoCreation() throws Exception { .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, PUT_VALUES_SCRIPT, Collections.singletonMap("extra", "foo"))) .setFetchSource(true) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -305,19 +297,19 @@ public void testUpdate() throws Exception { Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); DocumentMissingException ex = expectThrows( DocumentMissingException.class, - () -> client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).execute().actionGet() + () -> client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).get() ); assertEquals("[1]: document missing", ex.getMessage()); - client().prepareIndex("test").setId("1").setSource("field", 1).execute().actionGet(); + prepareIndex("test").setId("1").setSource("field", 1).get(); - UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).execute().actionGet(); + UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).get(); assertThat(updateResponse.getVersion(), equalTo(2L)); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2")); } @@ -326,14 +318,13 @@ public void testUpdate() throws Exception { params.put("field", "field"); updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, params)) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5")); } @@ -347,14 +338,13 @@ public void testUpdate() throws Exception { Collections.singletonMap("_ctx", Collections.singletonMap("op", "none")) ) ) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getVersion(), equalTo(3L)); assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5")); } @@ -368,19 +358,18 @@ public void testUpdate() throws Exception { Collections.singletonMap("_ctx", Collections.singletonMap("op", "delete")) ) ) - .execute() - .actionGet(); + .get(); assertThat(updateResponse.getVersion(), equalTo(4L)); assertEquals(DocWriteResponse.Result.DELETED, updateResponse.getResult()); assertThat(updateResponse.getIndex(), equalTo("test")); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(false)); } // check _source parameter - client().prepareIndex("test").setId("1").setSource("field1", 1, "field2", 2).execute().actionGet(); + prepareIndex("test").setId("1").setSource("field1", 1, "field2", 2).get(); updateResponse = client().prepareUpdate(indexOrAlias(), "1") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field1"))) .setFetchSource("field1", "field2") @@ -395,24 +384,20 @@ public void testUpdate() throws Exception { // check updates without script // add new field - client().prepareIndex("test").setId("1").setSource("field", 1).execute().actionGet(); + prepareIndex("test").setId("1").setSource("field", 1).get(); client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()) - .execute() - .actionGet(); + .get(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1")); assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2")); } // change existing field - client().prepareUpdate(indexOrAlias(), "1") - .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()) - .execute() - .actionGet(); + client().prepareUpdate(indexOrAlias(), "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).get(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3")); assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2")); } @@ -427,13 +412,12 @@ public void testUpdate() throws Exception { testMap.put("commonkey", testMap2); testMap.put("map1", 8); - client().prepareIndex("test").setId("1").setSource("map", testMap).execute().actionGet(); + prepareIndex("test").setId("1").setSource("map", testMap).get(); client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()) - .execute() - .actionGet(); + .get(); for (int i = 0; i < 5; i++) { - GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "1").get(); Map map1 = get(getResponse.getSourceAsMap(), "map"); assertThat(map1.size(), equalTo(3)); assertThat(map1.containsKey("map1"), equalTo(true)); @@ -451,7 +435,7 @@ public void testUpdateWithIfSeqNo() throws Exception { createTestIndex(); ensureGreen(); - DocWriteResponse result = client().prepareIndex("test").setId("1").setSource("field", 1).get(); + DocWriteResponse result = prepareIndex("test").setId("1").setSource("field", 1).get(); expectThrows( VersionConflictEngineException.class, () -> client().prepareUpdate(indexOrAlias(), "1") @@ -498,8 +482,7 @@ public void testUpdateRequestWithBothScriptAndDoc() throws Exception { client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject()) .setScript(fieldIncScript) - .execute() - .actionGet(); + .get(); fail("Should have thrown ActionRequestValidationException"); } catch (ActionRequestValidationException e) { assertThat(e.validationErrors().size(), equalTo(1)); @@ -513,7 +496,7 @@ public void testUpdateRequestWithScriptAndShouldUpsertDoc() throws Exception { ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); try { - client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).setDocAsUpsert(true).execute().actionGet(); + client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).setDocAsUpsert(true).get(); fail("Should have thrown ActionRequestValidationException"); } catch (ActionRequestValidationException e) { assertThat(e.validationErrors().size(), equalTo(1)); @@ -527,26 +510,18 @@ public void testContextVariables() throws Exception { ensureGreen(); // Index some documents - client().prepareIndex() - .setIndex("test") - .setId("id1") - .setRouting("routing1") - .setSource("field1", 1, "content", "foo") - .execute() - .actionGet(); - - client().prepareIndex().setIndex("test").setId("id2").setSource("field1", 0, "content", "bar").execute().actionGet(); + prepareIndex("test").setId("id1").setRouting("routing1").setSource("field1", 1, "content", "foo").get(); + prepareIndex("test").setId("id2").setSource("field1", 0, "content", "bar").get(); // Update the first object and note context variables values UpdateResponse updateResponse = client().prepareUpdate("test", "id1") .setRouting("routing1") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, EXTRACT_CTX_SCRIPT, Collections.emptyMap())) - .execute() - .actionGet(); + .get(); assertEquals(2, updateResponse.getVersion()); - GetResponse getResponse = client().prepareGet("test", "id1").setRouting("routing1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "id1").setRouting("routing1").get(); Map updateContext = get(getResponse.getSourceAsMap(), "update_context"); assertEquals("test", updateContext.get("_index")); assertEquals("id1", updateContext.get("_id")); @@ -556,12 +531,11 @@ public void testContextVariables() throws Exception { // Idem with the second object updateResponse = client().prepareUpdate("test", "id2") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, EXTRACT_CTX_SCRIPT, Collections.emptyMap())) - .execute() - .actionGet(); + .get(); assertEquals(2, updateResponse.getVersion()); - getResponse = client().prepareGet("test", "id2").execute().actionGet(); + getResponse = client().prepareGet("test", "id2").get(); updateContext = get(getResponse.getSourceAsMap(), "update_context"); assertEquals("test", updateContext.get("_index")); assertEquals("id2", updateContext.get("_id")); @@ -602,14 +576,13 @@ public void run() { .setScript(fieldIncScript) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()); - client().prepareBulk().add(updateRequestBuilder).execute().actionGet(); + client().prepareBulk().add(updateRequestBuilder).get(); } else { client().prepareUpdate(indexOrAlias(), Integer.toString(i)) .setScript(fieldIncScript) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) - .execute() - .actionGet(); + .get(); } } logger.info("Client [{}] issued all [{}] requests.", Thread.currentThread().getName(), numberOfUpdatesPerThread); @@ -640,7 +613,7 @@ public void run() { } assertThat(failures.size(), equalTo(0)); for (int i = 0; i < numberOfUpdatesPerThread; i++) { - GetResponse response = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); + GetResponse response = client().prepareGet("test", Integer.toString(i)).get(); assertThat(response.getId(), equalTo(Integer.toString(i))); assertThat(response.isExists(), equalTo(true)); assertThat(response.getVersion(), equalTo((long) numberOfThreads)); @@ -849,15 +822,14 @@ private void waitForOutstandingRequests(TimeValue timeOut, Semaphore requestsOut .setScript(fieldIncScript) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) - .execute() - .actionGet(); + .get(); } refresh(); for (int i = 0; i < numberOfIdsPerThread; ++i) { int totalFailures = 0; - GetResponse response = client().prepareGet("test", Integer.toString(i)).execute().actionGet(); + GetResponse response = client().prepareGet("test", Integer.toString(i)).get(); if (response.isExists()) { assertThat(response.getId(), equalTo(Integer.toString(i))); int expectedVersion = (numberOfThreads * numberOfUpdatesPerId * 2) + 1; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index afb86bd175973..603bfbeaa3dfe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -65,56 +65,42 @@ public void testSimpleValidateQuery() throws Exception { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); refresh(); assertThat( indicesAdmin().prepareValidateQuery("test") .setQuery(QueryBuilders.wrapperQuery("foo".getBytes(StandardCharsets.UTF_8))) - .execute() - .actionGet() + .get() .isValid(), equalTo(false) ); assertThat( - indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_id:1")).execute().actionGet().isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_id:1")).get().isValid(), equalTo(true) ); assertThat( - indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_i:d:1")).execute().actionGet().isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_i:d:1")).get().isValid(), equalTo(false) ); assertThat( - indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1")).execute().actionGet().isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1")).get().isValid(), equalTo(true) ); assertThat( - indicesAdmin().prepareValidateQuery("test") - .setQuery(QueryBuilders.queryStringQuery("bar:hey").lenient(false)) - .execute() - .actionGet() - .isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("bar:hey").lenient(false)).get().isValid(), equalTo(false) ); assertThat( - indicesAdmin().prepareValidateQuery("test") - .setQuery(QueryBuilders.queryStringQuery("nonexistent:hello")) - .execute() - .actionGet() - .isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("nonexistent:hello")).get().isValid(), equalTo(true) ); assertThat( - indicesAdmin().prepareValidateQuery("test") - .setQuery(QueryBuilders.queryStringQuery("foo:1 AND")) - .execute() - .actionGet() - .isValid(), + indicesAdmin().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1 AND")).get().isValid(), equalTo(false) ); } @@ -149,15 +135,10 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .endObject() .endObject() ) - .execute() - .actionGet(); + .get(); for (int i = 0; i < 10; i++) { - client().prepareIndex("test") - .setSource("foo", "text", "bar", i, "baz", "blort") - .setId(Integer.toString(i)) - .execute() - .actionGet(); + prepareIndex("test").setSource("foo", "text", "bar", i, "baz", "blort").setId(Integer.toString(i)).get(); } refresh(); @@ -167,8 +148,7 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .prepareValidateQuery("test") .setQuery(QueryBuilders.wrapperQuery("foo".getBytes(StandardCharsets.UTF_8))) .setExplain(true) - .execute() - .actionGet(); + .get(); assertThat(response.isValid(), equalTo(false)); assertThat(response.getQueryExplanation().size(), equalTo(1)); assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to derive xcontent")); @@ -182,8 +162,7 @@ public void testExplainValidateQueryTwoNodes() throws IOException { .prepareValidateQuery("test") .setQuery(QueryBuilders.queryStringQuery("foo")) .setExplain(true) - .execute() - .actionGet(); + .get(); assertThat(response.isValid(), equalTo(true)); assertThat(response.getQueryExplanation().size(), equalTo(1)); assertThat( @@ -204,7 +183,7 @@ public void testExplainDateRangeInQueryString() { String aMonthAgo = DateTimeFormatter.ISO_LOCAL_DATE.format(now.plus(1, ChronoUnit.MONTHS)); String aMonthFromNow = DateTimeFormatter.ISO_LOCAL_DATE.format(now.minus(1, ChronoUnit.MONTHS)); - client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); + prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); @@ -265,10 +244,10 @@ public void testExplainWithRewriteValidateQuery() { .setMapping("field", "type=text,analyzer=whitespace") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1)) .get(); - client().prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); - client().prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); - client().prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); - client().prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); + prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); + prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); + prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); + prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); refresh(); // prefix queries @@ -311,10 +290,10 @@ public void testExplainWithRewriteValidateQueryAllShards() { .get(); // We are relying on specific routing behaviors for the result to be right, so // we cannot randomize the number of shards or change ids here. - client().prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); - client().prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); - client().prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); - client().prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); + prepareIndex("test").setId("1").setSource("field", "quick lazy huge brown pidgin").get(); + prepareIndex("test").setId("2").setSource("field", "the quick brown fox").get(); + prepareIndex("test").setId("3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); + prepareIndex("test").setId("4").setSource("field", "the lazy dog quacks like a duck").get(); refresh(); // prefix queries @@ -357,8 +336,7 @@ private static void assertExplanation(QueryBuilder queryBuilder, Matcher .setQuery(queryBuilder) .setExplain(true) .setRewrite(withRewrite) - .execute() - .actionGet(); + .get(); assertThat(response.getQueryExplanation().size(), equalTo(1)); assertThat(response.getQueryExplanation().get(0).getError(), nullValue()); assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher); @@ -376,8 +354,7 @@ private static void assertExplanations( .setExplain(true) .setRewrite(withRewrite) .setAllShards(allShards) - .execute() - .actionGet(); + .get(); assertThat(response.getQueryExplanation().size(), equalTo(matchers.size())); for (int i = 0; i < matchers.size(); i++) { assertThat(response.getQueryExplanation().get(i).getError(), nullValue()); @@ -391,15 +368,11 @@ public void testExplainTermsQueryWithLookup() { .setMapping("user", "type=integer", "followers", "type=integer") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)) .get(); - client().prepareIndex("twitter").setId("1").setSource("followers", new int[] { 1, 2, 3 }).get(); + prepareIndex("twitter").setId("1").setSource("followers", new int[] { 1, 2, 3 }).get(); refresh(); TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "1", "followers")); - ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("twitter") - .setQuery(termsLookupQuery) - .setExplain(true) - .execute() - .actionGet(); + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("twitter").setQuery(termsLookupQuery).setExplain(true).get(); assertThat(response.isValid(), is(true)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java index b191eb0cf4fe3..69c10edf89809 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java @@ -31,7 +31,7 @@ public void testConcurrentOperationOnSameDoc() throws Exception { final AtomicReference failure = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(numberOfUpdates); for (int i = 0; i < numberOfUpdates; i++) { - client().prepareIndex("test").setId("1").setSource("field1", i).execute(new ActionListener<>() { + prepareIndex("test").setId("1").setSource("field1", i).execute(new ActionListener<>() { @Override public void onResponse(DocWriteResponse response) { latch.countDown(); @@ -50,12 +50,12 @@ public void onFailure(Exception e) { assertThat(failure.get(), nullValue()); - indicesAdmin().prepareRefresh().execute().actionGet(); + indicesAdmin().prepareRefresh().get(); logger.info("done indexing, check all have the same field value"); - Map masterSource = client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(); + Map masterSource = client().prepareGet("test", "1").get().getSourceAsMap(); for (int i = 0; i < (cluster().size() * 5); i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource)); + assertThat(client().prepareGet("test", "1").get().getSourceAsMap(), equalTo(masterSource)); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index a68d56e05cb48..2b804293cd506 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -7,20 +7,22 @@ */ package org.elasticsearch.versioning; +import org.apache.logging.log4j.Level; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.coordination.LinearizabilityChecker; import org.elasticsearch.cluster.coordination.LinearizabilityChecker.LinearizabilityCheckAborted; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.discovery.AbstractDisruptionTestCase; @@ -30,7 +32,9 @@ import java.io.FileInputStream; import java.io.IOException; -import java.util.ArrayList; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -132,7 +136,7 @@ public void testSeqNoCASLinearizability() { logger.info("--> Indexing initial doc for {} keys", numberOfKeys); List partitions = IntStream.range(0, numberOfKeys) - .mapToObj(i -> client().prepareIndex("test").setId("ID:" + i).setSource("value", -1).get()) + .mapToObj(i -> prepareIndex("test").setId("ID:" + i).setSource("value", -1).get()) .map(response -> new Partition(response.getId(), new Version(response.getPrimaryTerm(), response.getSeqNo()))) .toList(); @@ -429,25 +433,55 @@ public void assertLinearizable() { history ); LinearizabilityChecker.SequentialSpec spec = new CASSequentialSpec(initialVersion); - boolean linearizable = false; + Boolean linearizable = null; try { linearizable = LinearizabilityChecker.isLinearizable(spec, history, missingResponseGenerator()); } catch (LinearizabilityCheckAborted e) { - logger.warn("linearizability check check was aborted", e); + logger.warn("linearizability check was aborted, assuming linearizable", e); } finally { - // implicitly test that we can serialize all histories. - String serializedHistory = base64Serialize(history); - if (linearizable == false) { - // we dump base64 encoded data, since the nature of this test is that it does not reproduce even with same seed. - logger.error( - "Linearizability check failed. Spec: {}, initial version: {}, serialized history: {}", - spec, - initialVersion, - serializedHistory - ); + try { + if (Boolean.TRUE.equals(linearizable)) { + // ensure that we can serialize all histories. + writeHistory(new OutputStreamStreamOutput(OutputStream.nullOutputStream()), history); + } else { + final var outcome = linearizable == null ? "inconclusive" : "unlinearizable"; + + logger.error( + "Linearizability check did not succeed. Spec: {}, initial version: {}, outcome: {}", + spec, + initialVersion, + outcome + ); + // we dump base64 encoded data, since the nature of this test is that it does not reproduce even with same seed. + try ( + var chunkedLoggingStream = ChunkedLoggingStream.create( + logger, + Level.ERROR, + "raw " + outcome + " history in partition " + id, + ReferenceDocs.LOGGING // any old docs link will do + ); + var output = new OutputStreamStreamOutput(chunkedLoggingStream) + ) { + writeHistory(output, history); + } + try ( + var chunkedLoggingStream = ChunkedLoggingStream.create( + logger, + Level.ERROR, + "visualisation of " + outcome + " history in partition " + id, + ReferenceDocs.LOGGING // any old docs link will do + ); + var writer = new OutputStreamWriter(chunkedLoggingStream, StandardCharsets.UTF_8) + ) { + LinearizabilityChecker.writeVisualisation(spec, history, missingResponseGenerator(), writer); + } + assertNull("Must not be unlinearizable", linearizable); + } + } catch (IOException e) { + logger.error("failure writing out history", e); + fail(e); } } - assertTrue("Must be linearizable", linearizable); } } @@ -623,31 +657,15 @@ private static Function missingResponseGenerator() { return input -> new FailureHistoryOutput(); } - private String base64Serialize(LinearizabilityChecker.History history) { - BytesStreamOutput output = new BytesStreamOutput(); - try { - List events = history.copyEvents(); - output.writeInt(events.size()); - for (LinearizabilityChecker.Event event : events) { - writeEvent(event, output); - } - output.close(); - return Base64.getEncoder().encodeToString(BytesReference.toBytes(output.bytes())); - } catch (IOException e) { - throw new RuntimeException(e); - } + private static void writeHistory(StreamOutput output, LinearizabilityChecker.History history) throws IOException { + output.writeCollection(history.copyEvents(), ConcurrentSeqNoVersioningIT::writeEvent); } private static LinearizabilityChecker.History readHistory(StreamInput input) throws IOException { - int size = input.readInt(); - List events = new ArrayList<>(size); - for (int i = 0; i < size; ++i) { - events.add(readEvent(input)); - } - return new LinearizabilityChecker.History(events); + return new LinearizabilityChecker.History(input.readCollectionAsList(ConcurrentSeqNoVersioningIT::readEvent)); } - private static void writeEvent(LinearizabilityChecker.Event event, BytesStreamOutput output) throws IOException { + private static void writeEvent(StreamOutput output, LinearizabilityChecker.Event event) throws IOException { output.writeEnum(event.type()); output.writeNamedWriteable((NamedWriteable) event.value()); output.writeInt(event.id()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 5a1c09098f21f..e7877dd862ded 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; @@ -36,6 +35,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -47,55 +47,41 @@ public void testExternalVersioningInitialDelete() throws Exception { // Note - external version doesn't throw version conflicts on deletes of non existent records. // This is different from internal versioning - DeleteResponse deleteResponse = client().prepareDelete("test", "1") - .setVersion(17) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); // this should conflict with the delete command transaction which told us that the object was deleted at version 17. assertFutureThrows( - client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setVersion(13) - .setVersionType(VersionType.EXTERNAL) - .execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(), VersionConflictEngineException.class ); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(18) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(18L)); } public void testExternalGTE() throws Exception { createIndex("test"); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(12) .setVersionType(VersionType.EXTERNAL_GTE) .get(); assertThat(indexResponse.getVersion(), equalTo(12L)); - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_2") .setVersion(12) .setVersionType(VersionType.EXTERNAL_GTE) .get(); assertThat(indexResponse.getVersion(), equalTo(12L)); - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_2") .setVersion(14) .setVersionType(VersionType.EXTERNAL_GTE) @@ -103,15 +89,11 @@ public void testExternalGTE() throws Exception { assertThat(indexResponse.getVersion(), equalTo(14L)); assertRequestBuilderThrows( - client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setVersion(13) - .setVersionType(VersionType.EXTERNAL_GTE), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL_GTE), VersionConflictEngineException.class ); - client().admin().indices().prepareRefresh().execute().actionGet(); + client().admin().indices().prepareRefresh().get(); if (randomBoolean()) { refresh(); } @@ -127,11 +109,7 @@ public void testExternalGTE() throws Exception { // Delete with a higher or equal version deletes all versions up to the given one. long v = randomIntBetween(14, 17); - DeleteResponse deleteResponse = client().prepareDelete("test", "1") - .setVersion(v) - .setVersionType(VersionType.EXTERNAL_GTE) - .execute() - .actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(v).setVersionType(VersionType.EXTERNAL_GTE).get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(v)); @@ -142,7 +120,7 @@ public void testExternalGTE() throws Exception { ); // But delete with a higher version is OK. - deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL_GTE).execute().actionGet(); + deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL_GTE).get(); assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(18L)); } @@ -151,31 +129,22 @@ public void testExternalVersioning() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") + DocWriteResponse indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(12) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(12L)); - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(14) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(14L)); assertFutureThrows( - client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setVersion(13) - .setVersionType(VersionType.EXTERNAL) - .execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(), VersionConflictEngineException.class ); @@ -183,7 +152,7 @@ public void testExternalVersioning() throws Exception { refresh(); } for (int i = 0; i < 10; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().getVersion(), equalTo(14L)); + assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(14L)); } // deleting with a lower version fails. @@ -193,11 +162,7 @@ public void testExternalVersioning() throws Exception { ); // Delete with a higher version deletes all versions up to the given one. - DeleteResponse deleteResponse = client().prepareDelete("test", "1") - .setVersion(17) - .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + DeleteResponse deleteResponse = client().prepareDelete("test", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(17L)); @@ -208,22 +173,20 @@ public void testExternalVersioning() throws Exception { ); // But delete with a higher version is OK. - deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + deleteResponse = client().prepareDelete("test", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(18L)); // TODO: This behavior breaks rest api returning http status 201 // good news is that it this is only the case until deletes GC kicks in. - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(19) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(19L)); - deleteResponse = client().prepareDelete("test", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + deleteResponse = client().prepareDelete("test", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); assertThat(deleteResponse.getVersion(), equalTo(20L)); @@ -232,13 +195,11 @@ public void testExternalVersioning() throws Exception { Thread.sleep(300); // gc works based on estimated sampled time. Give it a chance... // And now we have previous version return -1 - indexResponse = client().prepareIndex("test") - .setId("1") + indexResponse = prepareIndex("test").setId("1") .setSource("field1", "value1_1") .setVersion(20) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(indexResponse.getVersion(), equalTo(20L)); } @@ -248,7 +209,7 @@ public void testRequireUnitsOnUpdateSettings() throws Exception { HashMap newSettings = new HashMap<>(); newSettings.put("index.gc_deletes", "42"); try { - client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).get(); fail("did not hit expected exception"); } catch (IllegalArgumentException iae) { // expected @@ -268,12 +229,7 @@ public void testCompareAndSetInitialDelete() throws Exception { VersionConflictEngineException.class ); - DocWriteResponse indexResponse = client().prepareIndex("test") - .setId("1") - .setSource("field1", "value1_1") - .setCreate(true) - .execute() - .actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").setCreate(true).get(); assertThat(indexResponse.getVersion(), equalTo(1L)); } @@ -281,26 +237,26 @@ public void testCompareAndSet() { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").execute().actionGet(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertThat(indexResponse.getSeqNo(), equalTo(0L)); assertThat(indexResponse.getPrimaryTerm(), equalTo(1L)); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0L).setIfPrimaryTerm(1).get(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0L).setIfPrimaryTerm(1).get(); assertThat(indexResponse.getSeqNo(), equalTo(1L)); assertThat(indexResponse.getPrimaryTerm(), equalTo(1L)); assertFutureThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(1).execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(1).execute(), VersionConflictEngineException.class ); assertFutureThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(2).execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(10).setIfPrimaryTerm(2).execute(), VersionConflictEngineException.class ); assertFutureThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(1).setIfPrimaryTerm(2).execute(), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(1).setIfPrimaryTerm(2).execute(), VersionConflictEngineException.class ); @@ -317,7 +273,7 @@ public void testCompareAndSet() { VersionConflictEngineException.class ); - client().admin().indices().prepareRefresh().execute().actionGet(); + client().admin().indices().prepareRefresh().get(); for (int i = 0; i < 10; i++) { final GetResponse response = client().prepareGet("test", "1").get(); assertThat(response.getSeqNo(), equalTo(1L)); @@ -327,14 +283,18 @@ public void testCompareAndSet() { // search with versioning for (int i = 0; i < 10; i++) { // TODO: ADD SEQ NO! - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet(); - assertThat(searchResponse.getHits().getAt(0).getVersion(), equalTo(2L)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()).setVersion(true), + response -> assertThat(response.getHits().getAt(0).getVersion(), equalTo(2L)) + ); } // search without versioning for (int i = 0; i < 10; i++) { - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()).execute().actionGet(); - assertThat(searchResponse.getHits().getAt(0).getVersion(), equalTo(Versions.NOT_FOUND)); + assertResponse( + prepareSearch().setQuery(matchAllQuery()), + response -> assertThat(response.getHits().getAt(0).getVersion(), equalTo(Versions.NOT_FOUND)) + ); } DeleteResponse deleteResponse = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1).get(); @@ -366,21 +326,21 @@ public void testSimpleVersioningWithFlush() throws Exception { createIndex("test"); ensureGreen(); - DocWriteResponse indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); + DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_1").get(); assertThat(indexResponse.getSeqNo(), equalTo(0L)); - client().admin().indices().prepareFlush().execute().actionGet(); - indexResponse = client().prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0).setIfPrimaryTerm(1).get(); + client().admin().indices().prepareFlush().get(); + indexResponse = prepareIndex("test").setId("1").setSource("field1", "value1_2").setIfSeqNo(0).setIfPrimaryTerm(1).get(); assertThat(indexResponse.getSeqNo(), equalTo(1L)); - client().admin().indices().prepareFlush().execute().actionGet(); + client().admin().indices().prepareFlush().get(); assertRequestBuilderThrows( - client().prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(0).setIfPrimaryTerm(1), + prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(0).setIfPrimaryTerm(1), VersionConflictEngineException.class ); assertRequestBuilderThrows( - client().prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1"), + prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1"), VersionConflictEngineException.class ); @@ -390,20 +350,17 @@ public void testSimpleVersioningWithFlush() throws Exception { ); for (int i = 0; i < 10; i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().getVersion(), equalTo(2L)); + assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(2L)); } - client().admin().indices().prepareRefresh().execute().actionGet(); + client().admin().indices().prepareRefresh().get(); for (int i = 0; i < 10; i++) { - SearchResponse searchResponse = prepareSearch().setQuery(matchAllQuery()) - .setVersion(true) - .seqNoAndPrimaryTerm(true) - .execute() - .actionGet(); - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getAt(0).getVersion(), equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getSeqNo(), equalTo(1L)); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setVersion(true).seqNoAndPrimaryTerm(true), response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getVersion(), equalTo(2L)); + assertThat(response.getHits().getAt(0).getSeqNo(), equalTo(1L)); + }); } } @@ -411,10 +368,7 @@ public void testVersioningWithBulk() { createIndex("test"); ensureGreen(); - BulkResponse bulkResponse = client().prepareBulk() - .add(client().prepareIndex("test").setId("1").setSource("field1", "value1_1")) - .execute() - .actionGet(); + BulkResponse bulkResponse = client().prepareBulk().add(prepareIndex("test").setId("1").setSource("field1", "value1_1")).get(); assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(1)); IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse(); @@ -686,8 +640,7 @@ public void run() { idVersion.response = client().prepareDelete("test", id) .setVersion(version) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); } catch (VersionConflictEngineException vcee) { // OK: our version is too old assertThat(version, lessThanOrEqualTo(truth.get(id).version)); @@ -695,8 +648,7 @@ public void run() { } } else { try { - idVersion.response = client().prepareIndex("test") - .setId(id) + idVersion.response = prepareIndex("test").setId(id) .setSource("foo", "bar") .setVersion(version) .setVersionType(VersionType.EXTERNAL) @@ -744,7 +696,7 @@ public void run() { } else { expected = -1; } - long actualVersion = client().prepareGet("test", id).execute().actionGet().getVersion(); + long actualVersion = client().prepareGet("test", id).get().getVersion(); if (actualVersion != expected) { logger.error("--> FAILED: idVersion={} actualVersion= {}", idVersion, actualVersion); failed = true; @@ -764,21 +716,19 @@ public void run() { public void testDeleteNotLost() throws Exception { // We require only one shard for this test, so that the 2nd delete provokes pruning the deletes map: - indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1)).execute().actionGet(); + indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1)).get(); ensureGreen(); updateIndexSettings(Settings.builder().put("index.gc_deletes", "10ms").put("index.refresh_interval", "-1"), "test"); // Index a doc: - client().prepareIndex("test") - .setId("id") + prepareIndex("test").setId("id") .setSource("foo", "bar") .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); if (randomBoolean()) { // Force refresh so the add is sometimes visible in the searcher: @@ -786,20 +736,20 @@ public void testDeleteNotLost() throws Exception { } // Delete it - client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).get(); // Real-time get should reflect delete: - assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); + assertThat("doc should have been deleted", client().prepareGet("test", "id").get().getVersion(), equalTo(-1L)); // ThreadPool.relativeTimeInMillis has default granularity of 200 msec, so we must sleep at least that long; sleep much longer in // case system is busy: Thread.sleep(1000); // Delete an unrelated doc (provokes pruning deletes from versionMap) - client().prepareDelete("test", "id2").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + client().prepareDelete("test", "id2").setVersion(11).setVersionType(VersionType.EXTERNAL).get(); // Real-time get should still reflect delete: - assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); + assertThat("doc should have been deleted", client().prepareGet("test", "id").get().getVersion(), equalTo(-1L)); } public void testGCDeletesZero() throws Exception { @@ -809,14 +759,12 @@ public void testGCDeletesZero() throws Exception { // We test deletes, but can't rely on wall-clock delete GC: updateIndexSettings(Settings.builder().put("index.gc_deletes", "0ms"), "test"); // Index a doc: - client().prepareIndex("test") - .setId("id") + prepareIndex("test").setId("id") .setSource("foo", "bar") .setOpType(DocWriteRequest.OpType.INDEX) .setVersion(10) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); if (randomBoolean()) { // Force refresh so the add is sometimes visible in the searcher: @@ -824,47 +772,39 @@ public void testGCDeletesZero() throws Exception { } // Delete it - client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + client().prepareDelete("test", "id").setVersion(11).setVersionType(VersionType.EXTERNAL).get(); // Real-time get should reflect delete even though index.gc_deletes is 0: - assertThat("doc should have been deleted", client().prepareGet("test", "id").execute().actionGet().getVersion(), equalTo(-1L)); + assertThat("doc should have been deleted", client().prepareGet("test", "id").get().getVersion(), equalTo(-1L)); } public void testSpecialVersioning() { internalCluster().ensureAtLeastNumDataNodes(2); createIndex("test", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); - DocWriteResponse doc1 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc1 = prepareIndex("test").setId("1") .setSource("field", "value1") .setVersion(0) .setVersionType(VersionType.EXTERNAL) - .execute() - .actionGet(); + .get(); assertThat(doc1.getVersion(), equalTo(0L)); - DocWriteResponse doc2 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc2 = prepareIndex("test").setId("1") .setSource("field", "value2") .setVersion(Versions.MATCH_ANY) .setVersionType(VersionType.INTERNAL) - .execute() - .actionGet(); + .get(); assertThat(doc2.getVersion(), equalTo(1L)); client().prepareDelete("test", "1").get(); // v2 - DocWriteResponse doc3 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc3 = prepareIndex("test").setId("1") .setSource("field", "value3") .setVersion(Versions.MATCH_DELETED) .setVersionType(VersionType.INTERNAL) - .execute() - .actionGet(); + .get(); assertThat(doc3.getVersion(), equalTo(3L)); - DocWriteResponse doc4 = client().prepareIndex("test") - .setId("1") + DocWriteResponse doc4 = prepareIndex("test").setId("1") .setSource("field", "value4") .setVersion(4L) .setVersionType(VersionType.EXTERNAL_GTE) - .execute() - .actionGet(); + .get(); assertThat(doc4.getVersion(), equalTo(4L)); // Make sure that these versions are replicated correctly setReplicaCount(1, "test"); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 98dd182900f88..613e6868b8e9f 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -407,8 +407,10 @@ with org.elasticsearch.features.FeatureInfrastructureFeatures, org.elasticsearch.health.HealthFeatures, + org.elasticsearch.cluster.service.TransportFeatures, org.elasticsearch.cluster.metadata.MetadataFeatures, - org.elasticsearch.rest.RestFeatures; + org.elasticsearch.rest.RestFeatures, + org.elasticsearch.indices.IndicesFeatures; uses org.elasticsearch.plugins.internal.SettingsExtension; uses RestExtension; @@ -420,5 +422,9 @@ org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; - exports org.elasticsearch.cluster.routing.allocation.shards to org.elasticsearch.shardhealth, org.elasticsearch.serverless.shardhealth; + exports org.elasticsearch.cluster.routing.allocation.shards + to + org.elasticsearch.shardhealth, + org.elasticsearch.serverless.shardhealth, + org.elasticsearch.serverless.apifiltering; } diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 4bbfe994f7f6d..5c5133e478ee1 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1131,12 +1131,7 @@ private enum ElasticsearchExceptionHandle { UNKNOWN_VERSION_ADDED ), // 26 was BatchOperationException - SNAPSHOT_CREATION_EXCEPTION( - org.elasticsearch.snapshots.SnapshotCreationException.class, - org.elasticsearch.snapshots.SnapshotCreationException::new, - 27, - UNKNOWN_VERSION_ADDED - ), + // 27 was SnapshotCreationException // 28 was DeleteFailedEngineException, deprecated in 6.0, removed in 7.0 DOCUMENT_MISSING_EXCEPTION( org.elasticsearch.index.engine.DocumentMissingException.class, diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index d625da5df9cc7..b67b59aeee076 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -264,8 +264,13 @@ public static void maybeDieOnAnotherThread(final Throwable throwable) { /** * Deduplicate the failures by exception message and index. + * @param failures array to deduplicate + * @return deduplicated array; if failures is null or empty, it will be returned without modification */ public static ShardOperationFailedException[] groupBy(ShardOperationFailedException[] failures) { + if (failures == null || failures.length == 0) { + return failures; + } List uniqueFailures = new ArrayList<>(); Set reasons = new HashSet<>(); for (ShardOperationFailedException failure : failures) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 0c7145730e447..44f98305d2997 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -9,6 +9,7 @@ package org.elasticsearch; import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; import java.util.Collection; @@ -47,6 +48,7 @@ static TransportVersion def(int id) { return new TransportVersion(id); } + @UpdateForV9 // remove the transport versions with which v9 will not need to interact public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); public static final TransportVersion V_7_0_1 = def(7_00_01_99); @@ -161,7 +163,24 @@ static TransportVersion def(int id) { public static final TransportVersion UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED = def(8_530_00_0); public static final TransportVersion ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED = def(8_531_00_0); public static final TransportVersion DEPRECATED_COMPONENT_TEMPLATES_ADDED = def(8_532_00_0); - + public static final TransportVersion UPDATE_NON_DYNAMIC_SETTINGS_ADDED = def(8_533_00_0); + public static final TransportVersion REPO_ANALYSIS_REGISTER_OP_COUNT_ADDED = def(8_534_00_0); + public static final TransportVersion ML_TRAINED_MODEL_PREFIX_STRINGS_ADDED = def(8_535_00_0); + public static final TransportVersion COUNTED_KEYWORD_ADDED = def(8_536_00_0); + public static final TransportVersion SHAPE_VALUE_SERIALIZATION_ADDED = def(8_537_00_0); + public static final TransportVersion INFERENCE_MULTIPLE_INPUTS = def(8_538_00_0); + public static final TransportVersion ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS = def(8_539_00_0); + public static final TransportVersion ML_STATE_CHANGE_TIMESTAMPS = def(8_540_00_0); + public static final TransportVersion DATA_STREAM_FAILURE_STORE_ADDED = def(8_541_00_0); + public static final TransportVersion ML_INFERENCE_OPENAI_ADDED = def(8_542_00_0); + public static final TransportVersion SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS = def(8_543_00_0); + public static final TransportVersion TRANSFORM_GET_CHECKPOINT_QUERY_AND_CLUSTER_ADDED = def(8_544_00_0); + public static final TransportVersion GRANT_API_KEY_CLIENT_AUTHENTICATION_ADDED = def(8_545_00_0); + public static final TransportVersion PIT_WITH_INDEX_FILTER = def(8_546_00_0); + public static final TransportVersion NODE_INFO_VERSION_AS_STRING = def(8_547_00_0); + public static final TransportVersion GET_API_KEY_INVALIDATION_TIME_ADDED = def(8_548_00_0); + public static final TransportVersion ML_INFERENCE_GET_MULTIPLE_MODELS = def(8_549_00_0); + public static final TransportVersion INFERENCE_SERVICE_RESULTS_ADDED = def(8_550_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ @@ -202,15 +221,17 @@ static TransportVersion def(int id) { * If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the * transport versions known by a particular release ... * - * git show v8.9.1:server/src/main/java/org/elasticsearch/TransportVersions.java | grep def + * git show v8.11.0:server/src/main/java/org/elasticsearch/TransportVersions.java | grep '= def' * * ... or by a particular branch ... * - * git show 8.10:server/src/main/java/org/elasticsearch/TransportVersions.java | grep def + * git show 8.11:server/src/main/java/org/elasticsearch/TransportVersions.java | grep '= def' * * ... and you can see which versions were added in between two versions too ... * - * git diff 8.10..main -- server/src/main/java/org/elasticsearch/TransportVersions.java + * git diff v8.11.0..main -- server/src/main/java/org/elasticsearch/TransportVersions.java + * + * In branches 8.7-8.10 see server/src/main/java/org/elasticsearch/TransportVersion.java for the equivalent definitions. */ /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 56a00e25022d4..5dd9a3a055043 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -115,6 +115,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_13 = new Version(7_17_13_99); public static final Version V_7_17_14 = new Version(7_17_14_99); public static final Version V_7_17_15 = new Version(7_17_15_99); + public static final Version V_7_17_16 = new Version(7_17_16_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); public static final Version V_8_1_0 = new Version(8_01_00_99); @@ -155,6 +156,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_10_4 = new Version(8_10_04_99); public static final Version V_8_11_0 = new Version(8_11_00_99); public static final Version V_8_11_1 = new Version(8_11_01_99); + public static final Version V_8_11_2 = new Version(8_11_02_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version CURRENT = V_8_12_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 30ad4fdeaf04f..b0e18d5ef9b55 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -327,7 +327,12 @@ private void assertFirstRun() { @Override public void onResponse(Response response) { assertFirstRun(); - delegate.onResponse(response); + try { + delegate.onResponse(response); + } catch (Exception e) { + assert false : new AssertionError("listener [" + delegate + "] must handle its own exceptions", e); + throw e; + } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index a855b6b8ee7e3..e0f01405bcf0f 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -202,11 +202,12 @@ import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.SimulateBulkAction; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.bulk.TransportSimulateBulkAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.TransportDeleteAction; -import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; @@ -225,15 +226,8 @@ import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineTransportAction; -import org.elasticsearch.action.search.ClearScrollAction; -import org.elasticsearch.action.search.ClosePointInTimeAction; -import org.elasticsearch.action.search.MultiSearchAction; -import org.elasticsearch.action.search.OpenPointInTimeAction; import org.elasticsearch.action.search.RestClosePointInTimeAction; import org.elasticsearch.action.search.RestOpenPointInTimeAction; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchScrollAction; -import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportMultiSearchAction; @@ -439,6 +433,7 @@ import org.elasticsearch.rest.action.ingest.RestDeletePipelineAction; import org.elasticsearch.rest.action.ingest.RestGetPipelineAction; import org.elasticsearch.rest.action.ingest.RestPutPipelineAction; +import org.elasticsearch.rest.action.ingest.RestSimulateIngestAction; import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction; import org.elasticsearch.rest.action.search.RestClearScrollAction; import org.elasticsearch.rest.action.search.RestCountAction; @@ -758,15 +753,16 @@ public void reg actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class); actions.register(TransportShardMultiGetAction.TYPE, TransportShardMultiGetAction.class); actions.register(BulkAction.INSTANCE, TransportBulkAction.class); + actions.register(SimulateBulkAction.INSTANCE, TransportSimulateBulkAction.class); actions.register(TransportShardBulkAction.TYPE, TransportShardBulkAction.class); - actions.register(SearchAction.INSTANCE, TransportSearchAction.class); - actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); - actions.register(OpenPointInTimeAction.INSTANCE, TransportOpenPointInTimeAction.class); - actions.register(ClosePointInTimeAction.INSTANCE, TransportClosePointInTimeAction.class); - actions.register(SearchShardsAction.INSTANCE, TransportSearchShardsAction.class); - actions.register(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); - actions.register(ExplainAction.INSTANCE, TransportExplainAction.class); - actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); + actions.register(TransportSearchAction.TYPE, TransportSearchAction.class); + actions.register(TransportSearchScrollAction.TYPE, TransportSearchScrollAction.class); + actions.register(TransportOpenPointInTimeAction.TYPE, TransportOpenPointInTimeAction.class); + actions.register(TransportClosePointInTimeAction.TYPE, TransportClosePointInTimeAction.class); + actions.register(TransportSearchShardsAction.TYPE, TransportSearchShardsAction.class); + actions.register(TransportMultiSearchAction.TYPE, TransportMultiSearchAction.class); + actions.register(TransportExplainAction.TYPE, TransportExplainAction.class); + actions.register(TransportClearScrollAction.TYPE, TransportClearScrollAction.class); actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class); actions.register(AutoCreateAction.INSTANCE, AutoCreateAction.TransportAction.class); @@ -944,6 +940,7 @@ public void initRestHandlers(Supplier nodesInCluster, Predicate< registerHandler.accept(new RestGetComposableIndexTemplateAction()); registerHandler.accept(new RestDeleteComposableIndexTemplateAction()); registerHandler.accept(new RestSimulateIndexTemplateAction()); + registerHandler.accept(new RestSimulateIngestAction()); registerHandler.accept(new RestSimulateTemplateAction()); registerHandler.accept(new RestPutMappingAction()); diff --git a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java index 3c59e3d66d4db..7feabf7e0241f 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; /** @@ -58,6 +59,32 @@ public String toString() { }); } + /** + * Same as {@link #supply(ActionListener, CheckedSupplier)} but the supplier always returns an object of reference counted result type + * which will have its reference count decremented after invoking the listener. + */ + public static ActionRunnable supplyAndDecRef( + ActionListener listener, + CheckedSupplier supplier + ) { + return wrap(listener, new CheckedConsumer<>() { + @Override + public void accept(ActionListener l) throws Exception { + var res = supplier.get(); + try { + l.onResponse(res); + } finally { + res.decRef(); + } + } + + @Override + public String toString() { + return supplier.toString(); + } + }); + } + /** * Creates a {@link Runnable} that wraps the given listener and a consumer of it that is executed when the {@link Runnable} is run. * Invokes {@link ActionListener#onFailure(Exception)} on it if an exception is thrown on executing the consumer. diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index ef04198c7374b..e3373ded94dc7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -36,7 +36,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.Locale; +import java.util.function.Consumer; import static java.lang.String.format; @@ -44,7 +46,7 @@ public class TransportUpdateDesiredNodesAction extends TransportMasterNodeAction private static final Logger logger = LogManager.getLogger(TransportUpdateDesiredNodesAction.class); private final FeatureService featureService; - private final DesiredNodesSettingsValidator settingsValidator; + private final Consumer> desiredNodesValidator; private final MasterServiceTaskQueue taskQueue; @Inject @@ -55,7 +57,28 @@ public TransportUpdateDesiredNodesAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - DesiredNodesSettingsValidator settingsValidator, + AllocationService allocationService + ) { + this( + transportService, + clusterService, + featureService, + threadPool, + actionFilters, + indexNameExpressionResolver, + new DesiredNodesSettingsValidator(), + allocationService + ); + } + + TransportUpdateDesiredNodesAction( + TransportService transportService, + ClusterService clusterService, + FeatureService featureService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + Consumer> desiredNodesValidator, AllocationService allocationService ) { super( @@ -71,7 +94,7 @@ public TransportUpdateDesiredNodesAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.featureService = featureService; - this.settingsValidator = settingsValidator; + this.desiredNodesValidator = desiredNodesValidator; this.taskQueue = clusterService.createTaskQueue( "update-desired-nodes", Priority.URGENT, @@ -92,7 +115,7 @@ protected void masterOperation( ActionListener responseListener ) throws Exception { ActionListener.run(responseListener, listener -> { - settingsValidator.validate(request.getNodes()); + desiredNodesValidator.accept(request.getNodes()); taskQueue.submitTask("update-desired-nodes", new UpdateDesiredNodesTask(request, listener), request.masterNodeTimeout()); }); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index af8637cf1febc..63c2be9050ab0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.SystemIndices; @@ -54,6 +55,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA /** * Once all feature migrations for 8.x -> 9.x have been tested, we can bump this to Version.V_8_0_0 */ + @UpdateForV9 public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_7_0_0; public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_7_0_0; @@ -61,6 +63,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA PersistentTasksService persistentTasksService; @Inject + @UpdateForV9 // Once we begin working on 9.x, we need to update our migration classes public TransportGetFeatureUpgradeStatusAction( TransportService transportService, ThreadPool threadPool, @@ -82,8 +85,6 @@ public TransportGetFeatureUpgradeStatusAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); - assert Version.CURRENT.major == 8 : "Once we begin working on 9.x, we need to update our migration classes"; - this.systemIndices = systemIndices; this.persistentTasksService = persistentTasksService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 6c10a6a07cba6..6e700ca4aecc3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -40,7 +40,7 @@ */ public class NodeInfo extends BaseNodeResponse { - private final Version version; + private final String version; private final TransportVersion transportVersion; private final IndexVersion indexVersion; private final Map componentVersions; @@ -61,16 +61,23 @@ public class NodeInfo extends BaseNodeResponse { public NodeInfo(StreamInput in) throws IOException { super(in); - version = Version.readVersion(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_VERSION_AS_STRING)) { + version = in.readString(); transportVersion = TransportVersion.readVersion(in); - } else { - transportVersion = TransportVersion.fromId(version.id); - } - if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_INDEX_VERSION_ADDED)) { indexVersion = IndexVersion.readVersion(in); } else { - indexVersion = IndexVersion.fromId(version.id); + Version legacyVersion = Version.readVersion(in); + version = legacyVersion.toString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + transportVersion = TransportVersion.readVersion(in); + } else { + transportVersion = TransportVersion.fromId(legacyVersion.id); + } + if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_INDEX_VERSION_ADDED)) { + indexVersion = IndexVersion.readVersion(in); + } else { + indexVersion = IndexVersion.fromId(legacyVersion.id); + } } if (in.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_COMPONENT_VERSIONS_ADDED)) { componentVersions = in.readImmutableMap(StreamInput::readString, StreamInput::readVInt); @@ -105,7 +112,7 @@ public NodeInfo(StreamInput in) throws IOException { } public NodeInfo( - Version version, + String version, TransportVersion transportVersion, IndexVersion indexVersion, Map componentVersions, @@ -156,7 +163,7 @@ public String getHostname() { * The current ES version */ public String getVersion() { - return version.toString(); + return version; } /** @@ -227,7 +234,11 @@ private void addInfoIfNonNull(Class clazz, @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - Version.writeVersion(version, out); + if (out.getTransportVersion().onOrAfter(TransportVersions.NODE_INFO_VERSION_AS_STRING)) { + out.writeString(version); + } else { + Version.writeVersion(Version.fromString(version), out); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { TransportVersion.writeVersion(transportVersion, out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 9f9613e7834a0..c19ff7ea3e46e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.common.Strings; @@ -20,37 +19,27 @@ import java.io.IOException; import java.util.Arrays; -import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.stream.Collectors; /** * A request to get node (cluster) level stats. */ public class NodesStatsRequest extends BaseNodesRequest { - private CommonStatsFlags indices = new CommonStatsFlags(); - private final Set requestedMetrics = new HashSet<>(); - private boolean includeShardsStats = true; + private NodesStatsRequestParameters nodesStatsRequestParameters; public NodesStatsRequest() { super((String[]) null); + nodesStatsRequestParameters = new NodesStatsRequestParameters(); } public NodesStatsRequest(StreamInput in) throws IOException { super(in); - indices = new CommonStatsFlags(in); - requestedMetrics.clear(); - requestedMetrics.addAll(in.readStringCollectionAsList()); - if (in.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { - includeShardsStats = in.readBoolean(); - } else { - includeShardsStats = true; - } + nodesStatsRequestParameters = new NodesStatsRequestParameters(in); } /** @@ -59,14 +48,15 @@ public NodesStatsRequest(StreamInput in) throws IOException { */ public NodesStatsRequest(String... nodesIds) { super(nodesIds); + nodesStatsRequestParameters = new NodesStatsRequestParameters(); } /** * Sets all the request flags. */ public NodesStatsRequest all() { - this.indices.all(); - this.requestedMetrics.addAll(Metric.allMetrics()); + this.nodesStatsRequestParameters.indices().all(); + this.nodesStatsRequestParameters.requestedMetrics().addAll(NodesStatsRequestParameters.Metric.allMetrics()); return this; } @@ -74,28 +64,28 @@ public NodesStatsRequest all() { * Clears all the request flags. */ public NodesStatsRequest clear() { - this.indices.clear(); - this.requestedMetrics.clear(); + this.nodesStatsRequestParameters.indices().clear(); + this.nodesStatsRequestParameters.requestedMetrics().clear(); return this; } /** - * Get indices. Handles separately from other metrics because it may or + * Get nodesStatsMetrics.indices(). Handles separately from other metrics because it may or * may not have submetrics. * @return flags indicating which indices stats to return */ public CommonStatsFlags indices() { - return indices; + return nodesStatsRequestParameters.indices(); } /** - * Set indices. Handles separately from other metrics because it may or + * Set nodesStatsMetrics.indices(). Handles separately from other metrics because it may or * may not involve submetrics. * @param indices flags indicating which indices stats to return * @return This object, for request chaining. */ public NodesStatsRequest indices(CommonStatsFlags indices) { - this.indices = indices; + nodesStatsRequestParameters.setIndices(indices); return this; } @@ -104,9 +94,9 @@ public NodesStatsRequest indices(CommonStatsFlags indices) { */ public NodesStatsRequest indices(boolean indices) { if (indices) { - this.indices.all(); + this.nodesStatsRequestParameters.indices().all(); } else { - this.indices.clear(); + this.nodesStatsRequestParameters.indices().clear(); } return this; } @@ -116,17 +106,17 @@ public NodesStatsRequest indices(boolean indices) { * handled separately. */ public Set requestedMetrics() { - return Set.copyOf(requestedMetrics); + return Set.copyOf(nodesStatsRequestParameters.requestedMetrics()); } /** * Add metric */ public NodesStatsRequest addMetric(String metric) { - if (Metric.allMetrics().contains(metric) == false) { + if (NodesStatsRequestParameters.Metric.allMetrics().contains(metric) == false) { throw new IllegalStateException("Used an illegal metric: " + metric); } - requestedMetrics.add(metric); + nodesStatsRequestParameters.requestedMetrics().add(metric); return this; } @@ -136,12 +126,12 @@ public NodesStatsRequest addMetric(String metric) { public NodesStatsRequest addMetrics(String... metrics) { // use sorted set for reliable ordering in error messages SortedSet metricsSet = new TreeSet<>(Set.of(metrics)); - if (Metric.allMetrics().containsAll(metricsSet) == false) { - metricsSet.removeAll(Metric.allMetrics()); + if (NodesStatsRequestParameters.Metric.allMetrics().containsAll(metricsSet) == false) { + metricsSet.removeAll(NodesStatsRequestParameters.Metric.allMetrics()); String plural = metricsSet.size() == 1 ? "" : "s"; throw new IllegalStateException("Used illegal metric" + plural + ": " + metricsSet); } - requestedMetrics.addAll(metricsSet); + nodesStatsRequestParameters.requestedMetrics().addAll(metricsSet); return this; } @@ -149,10 +139,10 @@ public NodesStatsRequest addMetrics(String... metrics) { * Remove metric */ public NodesStatsRequest removeMetric(String metric) { - if (Metric.allMetrics().contains(metric) == false) { + if (NodesStatsRequestParameters.Metric.allMetrics().contains(metric) == false) { throw new IllegalStateException("Used an illegal metric: " + metric); } - requestedMetrics.remove(metric); + nodesStatsRequestParameters.requestedMetrics().remove(metric); return this; } @@ -161,8 +151,8 @@ public String getDescription() { return Strings.format( "nodes=%s, metrics=%s, flags=%s", Arrays.toString(nodesIds()), - requestedMetrics.toString(), - Arrays.toString(indices.getFlags()) + nodesStatsRequestParameters.requestedMetrics().toString(), + Arrays.toString(nodesStatsRequestParameters.indices().getFlags()) ); } @@ -177,60 +167,17 @@ public String getDescription() { } public boolean includeShardsStats() { - return includeShardsStats; + return nodesStatsRequestParameters.includeShardsStats(); } public void setIncludeShardsStats(boolean includeShardsStats) { - this.includeShardsStats = includeShardsStats; + nodesStatsRequestParameters.setIncludeShardsStats(includeShardsStats); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - indices.writeTo(out); - out.writeStringCollection(requestedMetrics); - if (out.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { - out.writeBoolean(includeShardsStats); - } + nodesStatsRequestParameters.writeTo(out); } - /** - * An enumeration of the "core" sections of metrics that may be requested - * from the nodes stats endpoint. Eventually this list will be pluggable. - */ - public enum Metric { - OS("os"), - PROCESS("process"), - JVM("jvm"), - THREAD_POOL("thread_pool"), - FS("fs"), - TRANSPORT("transport"), - HTTP("http"), - BREAKER("breaker"), - SCRIPT("script"), - DISCOVERY("discovery"), - INGEST("ingest"), - ADAPTIVE_SELECTION("adaptive_selection"), - SCRIPT_CACHE("script_cache"), - INDEXING_PRESSURE("indexing_pressure"), - REPOSITORIES("repositories"); - - private String metricName; - - Metric(String name) { - this.metricName = name; - } - - public String metricName() { - return this.metricName; - } - - boolean containedIn(Set metricNames) { - return metricNames.contains(this.metricName()); - } - - static Set allMetrics() { - return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index 1cfa92d91e9f1..48f90ccc712fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -47,12 +47,12 @@ public NodesStatsRequestBuilder setIndices(boolean indices) { } public NodesStatsRequestBuilder setBreaker(boolean breaker) { - addOrRemoveMetric(breaker, NodesStatsRequest.Metric.BREAKER); + addOrRemoveMetric(breaker, NodesStatsRequestParameters.Metric.BREAKER); return this; } public NodesStatsRequestBuilder setScript(boolean script) { - addOrRemoveMetric(script, NodesStatsRequest.Metric.SCRIPT); + addOrRemoveMetric(script, NodesStatsRequestParameters.Metric.SCRIPT); return this; } @@ -68,7 +68,7 @@ public NodesStatsRequestBuilder setIndices(CommonStatsFlags indices) { * Should the node OS stats be returned. */ public NodesStatsRequestBuilder setOs(boolean os) { - addOrRemoveMetric(os, NodesStatsRequest.Metric.OS); + addOrRemoveMetric(os, NodesStatsRequestParameters.Metric.OS); return this; } @@ -76,7 +76,7 @@ public NodesStatsRequestBuilder setOs(boolean os) { * Should the node OS stats be returned. */ public NodesStatsRequestBuilder setProcess(boolean process) { - addOrRemoveMetric(process, NodesStatsRequest.Metric.PROCESS); + addOrRemoveMetric(process, NodesStatsRequestParameters.Metric.PROCESS); return this; } @@ -84,7 +84,7 @@ public NodesStatsRequestBuilder setProcess(boolean process) { * Should the node JVM stats be returned. */ public NodesStatsRequestBuilder setJvm(boolean jvm) { - addOrRemoveMetric(jvm, NodesStatsRequest.Metric.JVM); + addOrRemoveMetric(jvm, NodesStatsRequestParameters.Metric.JVM); return this; } @@ -92,7 +92,7 @@ public NodesStatsRequestBuilder setJvm(boolean jvm) { * Should the node thread pool stats be returned. */ public NodesStatsRequestBuilder setThreadPool(boolean threadPool) { - addOrRemoveMetric(threadPool, NodesStatsRequest.Metric.THREAD_POOL); + addOrRemoveMetric(threadPool, NodesStatsRequestParameters.Metric.THREAD_POOL); return this; } @@ -100,7 +100,7 @@ public NodesStatsRequestBuilder setThreadPool(boolean threadPool) { * Should the node file system stats be returned. */ public NodesStatsRequestBuilder setFs(boolean fs) { - addOrRemoveMetric(fs, NodesStatsRequest.Metric.FS); + addOrRemoveMetric(fs, NodesStatsRequestParameters.Metric.FS); return this; } @@ -108,7 +108,7 @@ public NodesStatsRequestBuilder setFs(boolean fs) { * Should the node Transport stats be returned. */ public NodesStatsRequestBuilder setTransport(boolean transport) { - addOrRemoveMetric(transport, NodesStatsRequest.Metric.TRANSPORT); + addOrRemoveMetric(transport, NodesStatsRequestParameters.Metric.TRANSPORT); return this; } @@ -116,7 +116,7 @@ public NodesStatsRequestBuilder setTransport(boolean transport) { * Should the node HTTP stats be returned. */ public NodesStatsRequestBuilder setHttp(boolean http) { - addOrRemoveMetric(http, NodesStatsRequest.Metric.HTTP); + addOrRemoveMetric(http, NodesStatsRequestParameters.Metric.HTTP); return this; } @@ -124,7 +124,7 @@ public NodesStatsRequestBuilder setHttp(boolean http) { * Should the discovery stats be returned. */ public NodesStatsRequestBuilder setDiscovery(boolean discovery) { - addOrRemoveMetric(discovery, NodesStatsRequest.Metric.DISCOVERY); + addOrRemoveMetric(discovery, NodesStatsRequestParameters.Metric.DISCOVERY); return this; } @@ -132,12 +132,12 @@ public NodesStatsRequestBuilder setDiscovery(boolean discovery) { * Should ingest statistics be returned. */ public NodesStatsRequestBuilder setIngest(boolean ingest) { - addOrRemoveMetric(ingest, NodesStatsRequest.Metric.INGEST); + addOrRemoveMetric(ingest, NodesStatsRequestParameters.Metric.INGEST); return this; } public NodesStatsRequestBuilder setAdaptiveSelection(boolean adaptiveSelection) { - addOrRemoveMetric(adaptiveSelection, NodesStatsRequest.Metric.ADAPTIVE_SELECTION); + addOrRemoveMetric(adaptiveSelection, NodesStatsRequestParameters.Metric.ADAPTIVE_SELECTION); return this; } @@ -145,24 +145,24 @@ public NodesStatsRequestBuilder setAdaptiveSelection(boolean adaptiveSelection) * Should script context cache statistics be returned */ public NodesStatsRequestBuilder setScriptCache(boolean scriptCache) { - addOrRemoveMetric(scriptCache, NodesStatsRequest.Metric.SCRIPT_CACHE); + addOrRemoveMetric(scriptCache, NodesStatsRequestParameters.Metric.SCRIPT_CACHE); return this; } public NodesStatsRequestBuilder setIndexingPressure(boolean indexingPressure) { - addOrRemoveMetric(indexingPressure, NodesStatsRequest.Metric.INDEXING_PRESSURE); + addOrRemoveMetric(indexingPressure, NodesStatsRequestParameters.Metric.INDEXING_PRESSURE); return this; } public NodesStatsRequestBuilder setRepositoryStats(boolean repositoryStats) { - addOrRemoveMetric(repositoryStats, NodesStatsRequest.Metric.REPOSITORIES); + addOrRemoveMetric(repositoryStats, NodesStatsRequestParameters.Metric.REPOSITORIES); return this; } /** * Helper method for adding metrics to a request */ - private void addOrRemoveMetric(boolean includeMetric, NodesStatsRequest.Metric metric) { + private void addOrRemoveMetric(boolean includeMetric, NodesStatsRequestParameters.Metric metric) { if (includeMetric) { request.addMetric(metric.metricName()); } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java new file mode 100644 index 0000000000000..6c7ce472475cd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.stats; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This class encapsulates the metrics and other information needed to define scope when we are requesting node stats. + */ +public class NodesStatsRequestParameters implements Writeable { + private CommonStatsFlags indices = new CommonStatsFlags(); + private final Set requestedMetrics = new HashSet<>(); + private boolean includeShardsStats = true; + + public NodesStatsRequestParameters() {} + + public NodesStatsRequestParameters(StreamInput in) throws IOException { + indices = new CommonStatsFlags(in); + requestedMetrics.clear(); + requestedMetrics.addAll(in.readStringCollectionAsList()); + if (in.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { + includeShardsStats = in.readBoolean(); + } else { + includeShardsStats = true; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + indices.writeTo(out); + out.writeStringCollection(requestedMetrics); + if (out.getTransportVersion().onOrAfter(TransportVersions.INCLUDE_SHARDS_STATS_ADDED)) { + out.writeBoolean(includeShardsStats); + } + } + + public CommonStatsFlags indices() { + return indices; + } + + public void setIndices(CommonStatsFlags indices) { + this.indices = indices; + } + + public Set requestedMetrics() { + return requestedMetrics; + } + + public boolean includeShardsStats() { + return includeShardsStats; + } + + public void setIncludeShardsStats(boolean includeShardsStats) { + this.includeShardsStats = includeShardsStats; + } + + /** + * An enumeration of the "core" sections of metrics that may be requested + * from the nodes stats endpoint. Eventually this list will be pluggable. + */ + public enum Metric { + OS("os"), + PROCESS("process"), + JVM("jvm"), + THREAD_POOL("thread_pool"), + FS("fs"), + TRANSPORT("transport"), + HTTP("http"), + BREAKER("breaker"), + SCRIPT("script"), + DISCOVERY("discovery"), + INGEST("ingest"), + ADAPTIVE_SELECTION("adaptive_selection"), + SCRIPT_CACHE("script_cache"), + INDEXING_PRESSURE("indexing_pressure"), + REPOSITORIES("repositories"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + + boolean containedIn(Set metricNames) { + return metricNames.contains(this.metricName()); + } + + static Set allMetrics() { + return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index b9ab520c4da8d..96fc30f93c890 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -84,21 +84,21 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest, Task task) return nodeService.stats( request.indices(), request.includeShardsStats(), - NodesStatsRequest.Metric.OS.containedIn(metrics), - NodesStatsRequest.Metric.PROCESS.containedIn(metrics), - NodesStatsRequest.Metric.JVM.containedIn(metrics), - NodesStatsRequest.Metric.THREAD_POOL.containedIn(metrics), - NodesStatsRequest.Metric.FS.containedIn(metrics), - NodesStatsRequest.Metric.TRANSPORT.containedIn(metrics), - NodesStatsRequest.Metric.HTTP.containedIn(metrics), - NodesStatsRequest.Metric.BREAKER.containedIn(metrics), - NodesStatsRequest.Metric.SCRIPT.containedIn(metrics), - NodesStatsRequest.Metric.DISCOVERY.containedIn(metrics), - NodesStatsRequest.Metric.INGEST.containedIn(metrics), - NodesStatsRequest.Metric.ADAPTIVE_SELECTION.containedIn(metrics), - NodesStatsRequest.Metric.SCRIPT_CACHE.containedIn(metrics), - NodesStatsRequest.Metric.INDEXING_PRESSURE.containedIn(metrics), - NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) + NodesStatsRequestParameters.Metric.OS.containedIn(metrics), + NodesStatsRequestParameters.Metric.PROCESS.containedIn(metrics), + NodesStatsRequestParameters.Metric.JVM.containedIn(metrics), + NodesStatsRequestParameters.Metric.THREAD_POOL.containedIn(metrics), + NodesStatsRequestParameters.Metric.FS.containedIn(metrics), + NodesStatsRequestParameters.Metric.TRANSPORT.containedIn(metrics), + NodesStatsRequestParameters.Metric.HTTP.containedIn(metrics), + NodesStatsRequestParameters.Metric.BREAKER.containedIn(metrics), + NodesStatsRequestParameters.Metric.SCRIPT.containedIn(metrics), + NodesStatsRequestParameters.Metric.DISCOVERY.containedIn(metrics), + NodesStatsRequestParameters.Metric.INGEST.containedIn(metrics), + NodesStatsRequestParameters.Metric.ADAPTIVE_SELECTION.containedIn(metrics), + NodesStatsRequestParameters.Metric.SCRIPT_CACHE.containedIn(metrics), + NodesStatsRequestParameters.Metric.INDEXING_PRESSURE.containedIn(metrics), + NodesStatsRequestParameters.Metric.REPOSITORIES.containedIn(metrics) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java deleted file mode 100644 index 361d4509ed95b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.usage; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class NodesUsageRequestBuilder extends NodesOperationRequestBuilder< - NodesUsageRequest, - NodesUsageResponse, - NodesUsageRequestBuilder> { - - public NodesUsageRequestBuilder(ElasticsearchClient client, ActionType action) { - super(client, action, new NodesUsageRequest()); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index 498350b766448..412a34a6e1562 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -20,13 +20,6 @@ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< AcknowledgedResponse, DeleteRepositoryRequestBuilder> { - /** - * Constructs unregister repository request builder - */ - public DeleteRepositoryRequestBuilder(ElasticsearchClient client, DeleteRepositoryAction action) { - super(client, action, new DeleteRepositoryRequest()); - } - /** * Constructs unregister repository request builder with specified repository name */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 9ef6b5ca8a3d5..6a0d4a5e126f1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -20,13 +20,6 @@ public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationReques GetRepositoriesResponse, GetRepositoriesRequestBuilder> { - /** - * Creates new get repository request builder - */ - public GetRepositoriesRequestBuilder(ElasticsearchClient client, GetRepositoriesAction action) { - super(client, action, new GetRepositoriesRequest()); - } - /** * Creates new get repository request builder */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 0ef45712e5051..21401ba986674 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -24,13 +24,6 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< AcknowledgedResponse, PutRepositoryRequestBuilder> { - /** - * Constructs register repository request - */ - public PutRepositoryRequestBuilder(ElasticsearchClient client, PutRepositoryAction action) { - super(client, action, new PutRepositoryRequest()); - } - /** * Constructs register repository request for the repository with a given name */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 7c40030f14c00..dc6257b222ab2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -19,13 +19,6 @@ public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBu VerifyRepositoryResponse, VerifyRepositoryRequestBuilder> { - /** - * Constructs unregister repository request builder - */ - public VerifyRepositoryRequestBuilder(ElasticsearchClient client, VerifyRepositoryAction action) { - super(client, action, new VerifyRepositoryRequest()); - } - /** * Constructs unregister repository request builder with specified repository name */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 25d3c53521345..ae6ec9a5b3c49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -23,13 +23,6 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil CreateSnapshotResponse, CreateSnapshotRequestBuilder> { - /** - * Constructs a new create snapshot request builder - */ - public CreateSnapshotRequestBuilder(ElasticsearchClient client, CreateSnapshotAction action) { - super(client, action, new CreateSnapshotRequest()); - } - /** * Constructs a new create snapshot request builder with specified repository and snapshot names */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 3ceab6badcaa8..4046c0bc7dd03 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -20,13 +20,6 @@ public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuil AcknowledgedResponse, DeleteSnapshotRequestBuilder> { - /** - * Constructs delete snapshot request builder - */ - public DeleteSnapshotRequestBuilder(ElasticsearchClient client, DeleteSnapshotAction action) { - super(client, action, new DeleteSnapshotRequest()); - } - /** * Constructs delete snapshot request builder with specified repository and snapshot names */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 04e67a86e4a7a..49cc5df049332 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -25,13 +25,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> { - /** - * Constructs new restore snapshot request builder - */ - public RestoreSnapshotRequestBuilder(ElasticsearchClient client, RestoreSnapshotAction action) { - super(client, action, new RestoreSnapshotRequest()); - } - /** * Constructs new restore snapshot request builder with specified repository and snapshot names */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java index e27972a60cbc9..099c299e0114e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/DenseVectorFieldStats.java @@ -19,6 +19,8 @@ * Holds enhanced stats about a dense vector mapped field. */ public final class DenseVectorFieldStats extends FieldStats { + static final int UNSET = -1; + int indexedVectorCount; // number of times vectors with index:true are used in mappings of this cluster int indexedVectorDimMin; // minimum dimension of indexed vectors in this cluster int indexedVectorDimMax; // maximum dimension of indexed vectors in this cluster @@ -26,8 +28,8 @@ public final class DenseVectorFieldStats extends FieldStats { DenseVectorFieldStats(String name) { super(name); indexedVectorCount = 0; - indexedVectorDimMin = 1024; - indexedVectorDimMax = 0; + indexedVectorDimMin = UNSET; + indexedVectorDimMax = UNSET; } DenseVectorFieldStats(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index 197a5d839eecf..e2ade5060c476 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -87,13 +87,17 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { stats = fieldTypes.computeIfAbsent(type, DenseVectorFieldStats::new); boolean indexed = fieldMapping.containsKey("index") ? (boolean) fieldMapping.get("index") : false; if (indexed) { - ((DenseVectorFieldStats) stats).indexedVectorCount += count; - int dims = (int) fieldMapping.get("dims"); - if (dims < ((DenseVectorFieldStats) stats).indexedVectorDimMin) { - ((DenseVectorFieldStats) stats).indexedVectorDimMin = dims; - } - if (dims > ((DenseVectorFieldStats) stats).indexedVectorDimMax) { - ((DenseVectorFieldStats) stats).indexedVectorDimMax = dims; + DenseVectorFieldStats vStats = (DenseVectorFieldStats) stats; + vStats.indexedVectorCount += count; + Object obj = fieldMapping.get("dims"); + if (obj != null) { + int dims = (int) obj; + if (vStats.indexedVectorDimMin == DenseVectorFieldStats.UNSET || dims < vStats.indexedVectorDimMin) { + vStats.indexedVectorDimMin = dims; + } + if (vStats.indexedVectorDimMin == DenseVectorFieldStats.UNSET || dims > vStats.indexedVectorDimMax) { + vStats.indexedVectorDimMax = dims; + } } } } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index ee6797ca58fb9..9d10065c9c3e9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -22,6 +23,7 @@ import java.io.IOException; import java.util.Map; +@UpdateForV9 // make this class a regular ActionRequest rather than a MasterNodeReadRequest public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandHidden(); @@ -40,9 +42,10 @@ public GetAliasesRequest() {} /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until we no - * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. Once we remove this we can - * also make this class a regular ActionRequest instead of a MasterNodeReadRequest. + * longer need to support calling this action remotely. Once we remove this we can also make this class a regular ActionRequest instead + * of a MasterNodeReadRequest. */ + @UpdateForV9 // remove this constructor public GetAliasesRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index c0e26b16585c4..edb05b0fcef75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.List; @@ -38,8 +39,9 @@ public Map> getDataStreamAliases() { /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until we no - * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. + * longer need to support calling this action remotely. */ + @UpdateForV9 // replace this implementation with TransportAction.localOnly() @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(aliases, StreamOutput::writeCollection); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index e43d1a825c233..9b9fb49c1bbe0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.tasks.CancellableTask; @@ -41,9 +42,9 @@ /** * NB prior to 8.12 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService (i.e. a - * HandledTransportAction) until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and - * earlier. + * HandledTransportAction) until we no longer need to support calling this action remotely. */ +@UpdateForV9 // remove the HandledTransportAction superclass, this action need not be registered with the TransportService public class TransportGetAliasesAction extends TransportLocalClusterStateAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportGetAliasesAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java index 7e6b31271ae90..3d6bf0ff15bb1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -18,10 +18,6 @@ */ public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder { - public CloseIndexRequestBuilder(ElasticsearchClient client, CloseIndexAction action) { - super(client, action, new CloseIndexRequest()); - } - public CloseIndexRequestBuilder(ElasticsearchClient client, CloseIndexAction action, String... indices) { super(client, action, new CloseIndexRequest(indices)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 1cec71d2abe53..87334afa3ed8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -50,6 +50,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -111,7 +112,7 @@ public TransportAction( this.taskQueue = clusterService.createTaskQueue("auto-create", Priority.URGENT, batchExecutionContext -> { final var listener = new AllocationActionMultiListener(threadPool.getThreadContext()); final var taskContexts = batchExecutionContext.taskContexts(); - final var successfulRequests = Maps.newMapWithExpectedSize(taskContexts.size()); + final var successfulRequests = Maps.>newMapWithExpectedSize(taskContexts.size()); var state = batchExecutionContext.initialState(); for (final var taskContext : taskContexts) { final var task = taskContext.getTask(); @@ -169,6 +170,13 @@ public void onFailure(Exception e) { private ClusterStateAckListener getAckListener( String indexName, AllocationActionMultiListener allocationActionMultiListener + ) { + return getAckListener(List.of(indexName), allocationActionMultiListener); + } + + private ClusterStateAckListener getAckListener( + List indexNames, + AllocationActionMultiListener allocationActionMultiListener ) { return new ClusterStateAckListener() { @Override @@ -180,22 +188,22 @@ public boolean mustAck(DiscoveryNode discoveryNode) { public void onAllNodesAcked() { ActiveShardsObserver.waitForActiveShards( clusterService, - new String[] { indexName }, + indexNames.toArray(String[]::new), ActiveShardCount.DEFAULT, request.timeout(), allocationActionMultiListener.delay(listener) - .map(shardsAcked -> new CreateIndexResponse(true, shardsAcked, indexName)) + .map(shardsAcked -> new CreateIndexResponse(true, shardsAcked, indexNames.get(0))) ); } @Override public void onAckFailure(Exception e) { - allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexName)); + allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexNames.get(0))); } @Override public void onAckTimeout() { - allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexName)); + allocationActionMultiListener.delay(listener).onResponse(new CreateIndexResponse(false, false, indexNames.get(0))); } @Override @@ -212,7 +220,7 @@ public TimeValue ackTimeout() { */ ClusterState execute( ClusterState currentState, - Map successfulRequests, + Map> successfulRequests, ClusterStateTaskExecutor.TaskContext taskContext, AllocationActionMultiListener allocationActionMultiListener ) throws Exception { @@ -255,9 +263,13 @@ ClusterState execute( rerouteCompletionIsNotRequired() ); - final var indexName = clusterState.metadata().dataStreams().get(request.index()).getIndices().get(0).getName(); - taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + final var dataStream = clusterState.metadata().dataStreams().get(request.index()); + final var backingIndexName = dataStream.getIndices().get(0).getName(); + final var indexNames = dataStream.getFailureIndices().isEmpty() + ? List.of(backingIndexName) + : List.of(backingIndexName, dataStream.getFailureIndices().get(0).getName()); + taskContext.success(getAckListener(indexNames, allocationActionMultiListener)); + successfulRequests.put(request, indexNames); return clusterState; } else { final var indexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); @@ -272,7 +284,7 @@ ClusterState execute( if (shouldAutoCreate == false) { // The index already exists. taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + successfulRequests.put(request, List.of(indexName)); return currentState; } } @@ -318,7 +330,7 @@ ClusterState execute( rerouteCompletionIsNotRequired() ); taskContext.success(getAckListener(indexName, allocationActionMultiListener)); - successfulRequests.put(request, indexName); + successfulRequests.put(request, List.of(indexName)); return clusterState; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index d3773a49df4dc..29af167679451 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -79,20 +79,18 @@ protected void shardOperationOnPrimary( IndexShard primary, ActionListener> listener ) { - ActionListener.completeWith(listener, () -> { - primary.flush(shardRequest.getRequest()); + primary.flush(shardRequest.getRequest(), listener.map(flushed -> { logger.trace("{} flush request executed on primary", primary.shardId()); return new PrimaryResult<>(shardRequest, new ReplicationResponse()); - }); + })); } @Override protected void shardOperationOnReplica(ShardFlushRequest request, IndexShard replica, ActionListener listener) { - ActionListener.completeWith(listener, () -> { - replica.flush(request.getRequest()); + replica.flush(request.getRequest(), listener.map(flushed -> { logger.trace("{} flush request executed on replica", replica.shardId()); return new ReplicaResult(); - }); + })); } // TODO: Remove this transition in 9.0 diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java index 060ead9deb246..85a31925901d4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -18,10 +18,6 @@ */ public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder { - public OpenIndexRequestBuilder(ElasticsearchClient client, OpenIndexAction action) { - super(client, action, new OpenIndexRequest()); - } - public OpenIndexRequestBuilder(ElasticsearchClient client, OpenIndexAction action, String... indices) { super(client, action, new OpenIndexRequest(indices)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index b613eab0d731c..19fa9c3d359fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -126,6 +126,7 @@ protected void masterOperation( ) .settings(requestSettings) .setPreserveExisting(request.isPreserveExisting()) + .reopenShards(request.reopen()) .ackTimeout(request.timeout()) .masterNodeTimeout(request.masterNodeTimeout()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java index f52c659ea55f4..99a43c6594c62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java @@ -22,6 +22,8 @@ public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterState private boolean preserveExisting = false; + private boolean reopenShards = false; + /** * Returns true iff the settings update should only add but not update settings. If the setting already exists * it should not be overwritten by this update. The default is false @@ -30,6 +32,20 @@ public boolean isPreserveExisting() { return preserveExisting; } + /** + * Returns true if non-dynamic setting updates should go through, by automatically unassigning shards in the same cluster + * state change as the setting update. The shards will be automatically reassigned after the cluster state update is made. The + * default is false. + */ + public boolean reopenShards() { + return reopenShards; + } + + public UpdateSettingsClusterStateUpdateRequest reopenShards(boolean reopenShards) { + this.reopenShards = reopenShards; + return this; + } + /** * Iff set to true this settings update will only add settings not already set on an index. Existing settings remain * unchanged. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 4e31fbc2b5732..013e568eff7c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -47,6 +47,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequesttrue if non-dynamic setting updates should go through, by automatically unassigning shards in the same cluster + * state change as the setting update. The shards will be automatically reassigned after the cluster state update is made. The + * default is false. + */ + public boolean reopen() { + return reopen; + } + + public void reopen(boolean reopen) { + this.reopen = reopen; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -186,6 +203,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeString(origin); } + if (out.getTransportVersion().onOrAfter(TransportVersions.UPDATE_NON_DYNAMIC_SETTINGS_ADDED)) { + out.writeBoolean(reopen); + } } @Override @@ -243,12 +263,13 @@ public boolean equals(Object o) { && Objects.equals(settings, that.settings) && Objects.equals(indicesOptions, that.indicesOptions) && Objects.equals(preserveExisting, that.preserveExisting) + && Objects.equals(reopen, that.reopen) && Arrays.equals(indices, that.indices); } @Override public int hashCode() { - return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, Arrays.hashCode(indices)); + return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, reopen, Arrays.hashCode(indices)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 9f8ac48feb861..a203f810ebf3d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -16,10 +16,6 @@ public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationReques AcknowledgedResponse, DeleteIndexTemplateRequestBuilder> { - public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action) { - super(client, action, new DeleteIndexTemplateRequest()); - } - public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action, String name) { super(client, action, new DeleteIndexTemplateRequest(name)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 8eb9d0b93e6b1..194ac7b77f65c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -15,10 +15,6 @@ public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequ GetIndexTemplatesResponse, GetIndexTemplatesRequestBuilder> { - public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action) { - super(client, action, new GetIndexTemplatesRequest()); - } - public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action, String... names) { super(client, action, new GetIndexTemplatesRequest(names)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 4ab0b6bd221e9..af40637db6703 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -53,6 +53,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findV2Template; @@ -69,6 +70,7 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea private final SystemIndices systemIndices; private final Set indexSettingProviders; private final ClusterSettings clusterSettings; + private final boolean isDslOnlyMode; @Inject public TransportSimulateIndexTemplateAction( @@ -100,6 +102,7 @@ public TransportSimulateIndexTemplateAction( this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } @Override @@ -146,6 +149,7 @@ protected void masterOperation( matchingTemplate, request.getIndexName(), stateWithTemplate, + isDslOnlyMode, xContentRegistry, indicesService, systemIndices, @@ -218,6 +222,7 @@ public static Template resolveTemplate( final String matchingTemplate, final String indexName, final ClusterState simulatedState, + final boolean isDslOnlyMode, final NamedXContentRegistry xContentRegistry, final IndicesService indicesService, final SystemIndices systemIndices, @@ -304,6 +309,9 @@ public static Template resolveTemplate( Settings settings = Settings.builder().put(templateSettings).put(additionalSettings.build()).build(); DataStreamLifecycle lifecycle = resolveLifecycle(simulatedState.metadata(), matchingTemplate); + if (template.getDataStreamTemplate() != null && lifecycle == null && isDslOnlyMode) { + lifecycle = DataStreamLifecycle.DEFAULT; + } return new Template(settings, mergedMapping, aliasesByName, lifecycle); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index b99f436dd86f9..1f35d0b8a1268 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -39,6 +39,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; @@ -56,6 +57,7 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi private final SystemIndices systemIndices; private final Set indexSettingProviders; private final ClusterSettings clusterSettings; + private final boolean isDslOnlyMode; @Inject public TransportSimulateTemplateAction( @@ -87,6 +89,7 @@ public TransportSimulateTemplateAction( this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } @Override @@ -162,6 +165,7 @@ protected void masterOperation( matchingTemplate, temporaryIndexName, stateWithTemplate, + isDslOnlyMode, xContentRegistry, indicesService, systemIndices, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 17439f2312036..7dc19ff52ce84 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -24,10 +24,6 @@ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBu AcknowledgedResponse, PutIndexTemplateRequestBuilder> { - public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action) { - super(client, action, new PutIndexTemplateRequest()); - } - public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action, String name) { super(client, action, new PutIndexTemplateRequest(name)); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 76259d899c90a..c2b6c666d829a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -505,7 +506,9 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeResponseType(StreamOutput out) throws IOException { - if (response instanceof IndexResponse) { + if (response instanceof SimulateIndexResponse) { + out.writeByte((byte) 4); + } else if (response instanceof IndexResponse) { out.writeByte((byte) 0); } else if (response instanceof DeleteResponse) { out.writeByte((byte) 1); @@ -523,6 +526,7 @@ private static DocWriteResponse readResponse(ShardId shardId, StreamInput in) th case 1 -> new DeleteResponse(shardId, in); case 2 -> null; case 3 -> new UpdateResponse(shardId, in); + case 4 -> new SimulateIndexResponse(in); default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); }; } @@ -534,6 +538,7 @@ private static DocWriteResponse readResponse(StreamInput in) throws IOException case 1 -> new DeleteResponse(in); case 2 -> null; case 3 -> new UpdateResponse(in); + case 4 -> new SimulateIndexResponse(in); default -> throw new IllegalArgumentException("Unexpected type [" + type + "]"); }; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index 3b6e69d16bae3..f1280587a0c55 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -19,6 +18,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.rest.action.document.RestBulkAction; @@ -430,32 +430,32 @@ public void parse( } } + @UpdateForV9 + // Warnings will need to be replaced with XContentEOFException from 9.x + private static void warnBulkActionNotProperlyClosed(String message) { + deprecationLogger.compatibleCritical(STRICT_ACTION_PARSING_WARNING_KEY, message); + } + private static void checkBulkActionIsProperlyClosed(XContentParser parser) throws IOException { XContentParser.Token token; try { token = parser.nextToken(); } catch (XContentEOFException ignore) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action wasn't closed properly with the closing brace. Malformed objects are currently accepted but will be " + "rejected in a future version." ); return; } if (token != XContentParser.Token.END_OBJECT) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action object contained multiple keys. Additional keys are currently ignored but will be rejected in a " + "future version." ); return; } if (parser.nextToken() != null) { - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; - deprecationLogger.compatibleCritical( - STRICT_ACTION_PARSING_WARNING_KEY, + warnBulkActionNotProperlyClosed( "A bulk action contained trailing data after the closing brace. This is currently ignored but will be rejected in a " + "future version." ); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index 6503c207e8290..33fb81a6520cb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -66,7 +66,7 @@ public PlainActionFuture withBackoff( BiConsumer> consumer, BulkRequest bulkRequest ) { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); withBackoff(consumer, bulkRequest, future); return future; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java similarity index 55% rename from server/src/main/java/org/elasticsearch/action/search/SearchAction.java rename to server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java index 61d7a3355dc8f..a799c60fe7b38 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkAction.java @@ -6,17 +6,16 @@ * Side Public License, v 1. */ -package org.elasticsearch.action.search; +package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionType; -public class SearchAction extends ActionType { +public class SimulateBulkAction extends ActionType { - public static final SearchAction INSTANCE = new SearchAction(); - public static final String NAME = "indices:data/read/search"; + public static final SimulateBulkAction INSTANCE = new SimulateBulkAction(); + public static final String NAME = "indices:data/write/simulate/bulk"; - private SearchAction() { - super(NAME, SearchResponse::new); + private SimulateBulkAction() { + super(NAME, BulkResponse::new); } - } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java new file mode 100644 index 0000000000000..c167c88954b38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; +import java.util.Map; + +/** + * This extends BulkRequest with support for providing substitute pipeline definitions. In a user request, the pipeline substitutions + * will look something like this: + * + * "pipeline_substitutions": { + * "my-pipeline-1": { + * "processors": [ + * { + * "set": { + * "field": "my-new-boolean-field", + * "value": true + * } + * } + * ] + * }, + * "my-pipeline-2": { + * "processors": [ + * { + * "set": { + * "field": "my-new-boolean-field", + * "value": true + * }, + * "rename": { + * "field": "old_field", + * "target_field": "new field" + * } + * } + * ] + * } + * } + * + * The pipelineSubstitutions Map held by this class is intended to be the result of XContentHelper.convertToMap(). The top-level keys + * are the pipelineIds ("my-pipeline-1" and "my-pipeline-2" in the example above). The values are the Maps of "processors" to the List of + * processor definitions. + */ +public class SimulateBulkRequest extends BulkRequest { + private final Map> pipelineSubstitutions; + + /** + * @param pipelineSubstitutions The pipeline definitions that are to be used in place of any pre-existing pipeline definitions with + * the same pipelineId. The key of the map is the pipelineId, and the value the pipeline definition as + * parsed by XContentHelper.convertToMap(). + */ + public SimulateBulkRequest(@Nullable Map> pipelineSubstitutions) { + super(); + this.pipelineSubstitutions = pipelineSubstitutions; + } + + @SuppressWarnings("unchecked") + public SimulateBulkRequest(StreamInput in) throws IOException { + super(in); + this.pipelineSubstitutions = (Map>) in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeGenericValue(pipelineSubstitutions); + } + + public Map> getPipelineSubstitutions() { + return pipelineSubstitutions; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 13d10be86bd68..b89b5e2de7924 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -15,9 +15,9 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; @@ -46,6 +46,7 @@ import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Assertions; @@ -93,6 +94,7 @@ public class TransportBulkAction extends HandledTransportAction bulkAction; private final ThreadPool threadPool; private final ClusterService clusterService; private final IngestService ingestService; @@ -142,8 +144,39 @@ public TransportBulkAction( SystemIndices systemIndices, LongSupplier relativeTimeProvider ) { - super(BulkAction.NAME, transportService, actionFilters, BulkRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this( + BulkAction.INSTANCE, + BulkRequest::new, + threadPool, + transportService, + clusterService, + ingestService, + client, + actionFilters, + indexNameExpressionResolver, + indexingPressure, + systemIndices, + relativeTimeProvider + ); + } + + TransportBulkAction( + ActionType bulkAction, + Writeable.Reader requestReader, + ThreadPool threadPool, + TransportService transportService, + ClusterService clusterService, + IngestService ingestService, + NodeClient client, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexingPressure indexingPressure, + SystemIndices systemIndices, + LongSupplier relativeTimeProvider + ) { + super(bulkAction.name(), transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); Objects.requireNonNull(relativeTimeProvider); + this.bulkAction = bulkAction; this.threadPool = threadPool; this.clusterService = clusterService; this.ingestService = ingestService; @@ -268,11 +301,9 @@ protected void doRun() { protected void doInternalExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener listener) { final long startTime = relativeTime(); - final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); boolean hasIndexRequestsWithPipelines = false; final Metadata metadata = clusterService.state().getMetadata(); - final Version minNodeVersion = clusterService.state().getNodes().getMinNodeVersion(); for (DocWriteRequest actionRequest : bulkRequest.requests) { IndexRequest indexRequest = getIndexWriteRequest(actionRequest); if (indexRequest != null) { @@ -281,7 +312,6 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec } if (actionRequest instanceof IndexRequest ir) { - ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion); if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) { throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally"); } @@ -304,7 +334,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec if (clusterService.localNode().isIngestNode()) { processBulkIndexIngestRequest(task, bulkRequest, executorName, l); } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, l); + ingestForwarder.forwardIngestRequest(bulkAction, bulkRequest, l); } }); return; @@ -336,6 +366,30 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec } // Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back. + createMissingIndicesAndIndexData( + task, + bulkRequest, + executorName, + listener, + autoCreateIndices, + indicesThatCannotBeCreated, + startTime + ); + } + + /* + * This method is responsible for creating any missing indices and indexing the data in the BulkRequest + */ + protected void createMissingIndicesAndIndexData( + Task task, + BulkRequest bulkRequest, + String executorName, + ActionListener listener, + Set autoCreateIndices, + Map indicesThatCannotBeCreated, + long startTime + ) { + final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); if (autoCreateIndices.isEmpty()) { executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); } else { @@ -386,6 +440,14 @@ protected void doRun() { } } + /* + * This returns the IngestService to be used for the given request. The default implementation ignores the request and always returns + * the same ingestService, but child classes might use information in the request in creating an IngestService specific to that request. + */ + protected IngestService getIngestService(BulkRequest request) { + return ingestService; + } + static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, Metadata metadata) { DocWriteRequest.OpType opType = writeRequest.opType(); if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { @@ -491,7 +553,7 @@ private static boolean setResponseFailureIfIndexMatches( return false; } - private long buildTookInMillis(long startTimeNanos) { + protected long buildTookInMillis(long startTimeNanos) { return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); } @@ -809,7 +871,7 @@ private void processBulkIndexIngestRequest( ) { final long ingestStartTimeInNanos = System.nanoTime(); final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - ingestService.executeBulkRequest( + getIngestService(original).executeBulkRequest( original.numberOfActions(), () -> bulkRequestModifier, bulkRequestModifier::markItemAsDropped, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java new file mode 100644 index 0000000000000..7e2fef88c7680 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.SimulateIndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.SimulateIngestService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Map; +import java.util.Set; + +public class TransportSimulateBulkAction extends TransportBulkAction { + @Inject + public TransportSimulateBulkAction( + ThreadPool threadPool, + TransportService transportService, + ClusterService clusterService, + IngestService ingestService, + NodeClient client, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndexingPressure indexingPressure, + SystemIndices systemIndices + ) { + super( + SimulateBulkAction.INSTANCE, + SimulateBulkRequest::new, + threadPool, + transportService, + clusterService, + ingestService, + client, + actionFilters, + indexNameExpressionResolver, + indexingPressure, + systemIndices, + System::nanoTime + ); + } + + /* + * This overrides indexData in TransportBulkAction in order to _not_ actually create any indices or index any data. Instead, each + * request gets a corresponding CREATE response, using information from the request. + */ + @Override + protected void createMissingIndicesAndIndexData( + Task task, + BulkRequest bulkRequest, + String executorName, + ActionListener listener, + Set autoCreateIndices, + Map indicesThatCannotBeCreated, + long startTime + ) { + final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); + for (int i = 0; i < bulkRequest.requests.size(); i++) { + DocWriteRequest request = bulkRequest.requests.get(i); + assert request instanceof IndexRequest; // This action is only ever called with IndexRequests + responses.set( + i, + BulkItemResponse.success( + 0, + DocWriteRequest.OpType.CREATE, + new SimulateIndexResponse( + request.id(), + request.index(), + request.version(), + ((IndexRequest) request).source(), + ((IndexRequest) request).getContentType(), + ((IndexRequest) request).getExecutedPipelines() + ) + ) + ); + } + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); + } + + /* + * This overrides TransportSimulateBulkAction's getIngestService to allow us to provide an IngestService that handles pipeline + * substitutions defined in the request. + */ + @Override + protected IngestService getIngestService(BulkRequest request) { + IngestService rawIngestService = super.getIngestService(request); + return new SimulateIngestService(rawIngestService, request); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 9c1fb63a6b8d0..7530fc18acb59 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -307,6 +307,24 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.endArray(); } builder.field(DataStream.GENERATION_FIELD.getPreferredName(), dataStream.getGeneration()); + if (DataStream.isFailureStoreEnabled()) { + builder.field(DataStream.FAILURE_INDICES_FIELD.getPreferredName()); + builder.startArray(); + for (Index failureStore : dataStream.getFailureIndices()) { + builder.startObject(); + failureStore.toXContentFragment(builder); + IndexProperties indexProperties = indexSettingsValues.get(failureStore); + if (indexProperties != null) { + builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm()); + if (indexProperties.ilmPolicyName() != null) { + builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName()); + } + builder.field(MANAGED_BY.getPreferredName(), indexProperties.managedBy.displayValue); + } + builder.endObject(); + } + builder.endArray(); + } if (dataStream.getMetadata() != null) { builder.field(DataStream.METADATA_FIELD.getPreferredName(), dataStream.getMetadata()); } @@ -327,6 +345,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(SYSTEM_FIELD.getPreferredName(), dataStream.isSystem()); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), dataStream.isAllowCustomRouting()); builder.field(REPLICATED.getPreferredName(), dataStream.isReplicated()); + if (DataStream.isFailureStoreEnabled()) { + builder.field(DataStream.FAILURE_STORE_FIELD.getPreferredName(), dataStream.isFailureStore()); + } if (timeSeries != null) { builder.startObject(TIME_SERIES.getPreferredName()); builder.startArray(TEMPORAL_RANGES.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 5cfdd2b796b14..29f8e4aba35f8 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -21,10 +21,6 @@ public class DeleteRequestBuilder extends ReplicationRequestBuilder { - public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) { - super(client, action, new DeleteRequest()); - } - public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action, @Nullable String index) { super(client, action, new DeleteRequest(index)); } diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index c2008823b0523..a2f4d6408a3a4 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -8,14 +8,12 @@ package org.elasticsearch.action.downsample; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; @@ -164,10 +162,4 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, DownsampleAction action) { - super(client, action, new Request()); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java deleted file mode 100644 index 7668a48d623da..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.explain; - -import org.elasticsearch.action.ActionType; - -/** - * Entry point for the explain feature. - */ -public class ExplainAction extends ActionType { - - public static final ExplainAction INSTANCE = new ExplainAction(); - public static final String NAME = "indices:data/read/explain"; - - private ExplainAction() { - super(NAME, ExplainResponse::new); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java index 4a9ae67c60e1e..9ae05687649ea 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequestBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.explain; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; @@ -20,11 +21,7 @@ */ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder { - ExplainRequestBuilder(ElasticsearchClient client, ExplainAction action) { - super(client, action, new ExplainRequest()); - } - - public ExplainRequestBuilder(ElasticsearchClient client, ExplainAction action, String index, String id) { + public ExplainRequestBuilder(ElasticsearchClient client, ActionType action, String index, String id) { super(client, action, new ExplainRequest().index(index).id(id)); } diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 0b6a0a3276646..d889f8fac8113 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; @@ -48,6 +49,7 @@ // TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain. public class TransportExplainAction extends TransportSingleShardAction { + public static final ActionType TYPE = new ActionType<>("indices:data/read/explain", ExplainResponse::new); private final SearchService searchService; @Inject @@ -60,7 +62,7 @@ public TransportExplainAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - ExplainAction.NAME, + TYPE.name(), threadPool, clusterService, transportService, diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index 7af64bed9f3cb..6871c60f11a15 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -88,13 +88,6 @@ public long getPrimaryTerm() { return getResult.getPrimaryTerm(); } - /** - * The source of the document if exists. - */ - public byte[] getSourceAsBytes() { - return getResult.source(); - } - /** * Returns the internal source bytes, as they are returned without munging (for example, * might still be compressed). @@ -132,7 +125,7 @@ public Map getSourceAsMap() throws ElasticsearchParseException { } public Map getSource() { - return getResult.getSource(); + return getResult.sourceAsMap(); } public Map getFields() { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 2f202dd21ad7c..8b5e077fd85b8 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -676,35 +676,14 @@ public void reset() { autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP; } - public void checkAutoIdWithOpTypeCreateSupportedByVersion(TransportVersion version) { - if (id == null && opType == OpType.CREATE && version.before(TransportVersions.V_7_5_0)) { - throw new IllegalArgumentException( - "optype create not supported for indexing requests without explicit id below transport version 7500099, current version " - + version - ); - } - } - - public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { - if (id == null && opType == OpType.CREATE && version.before(Version.V_7_5_0)) { - throw new IllegalArgumentException( - "optype create not supported for indexing requests without explicit id until all nodes are on version 7.5.0 or higher," - + " current version " - + version - ); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { - checkAutoIdWithOpTypeCreateSupportedByVersion(out.getTransportVersion()); super.writeTo(out); writeBody(out); } @Override public void writeThin(StreamOutput out) throws IOException { - checkAutoIdWithOpTypeCreateSupportedByVersion(out.getTransportVersion()); super.writeThin(out); writeBody(out); } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 9dccdfc64620e..a9c0c8ef42380 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -37,7 +37,7 @@ public class IndexResponse extends DocWriteResponse { * information about the pipelines executed. An empty list means that there were no pipelines executed. */ @Nullable - private final List executedPipelines; + protected final List executedPipelines; public IndexResponse(ShardId shardId, StreamInput in) throws IOException { super(shardId, in); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java index 32e1154a8af0a..fdc0e7ba42d92 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java @@ -14,10 +14,6 @@ public class DeletePipelineRequestBuilder extends ActionRequestBuilder { - public DeletePipelineRequestBuilder(ElasticsearchClient client, DeletePipelineAction action) { - super(client, action, new DeletePipelineRequest()); - } - public DeletePipelineRequestBuilder(ElasticsearchClient client, DeletePipelineAction action, String id) { super(client, action, new DeletePipelineRequest(id)); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java index 9d11fddc5f92b..48d5fa0f0968a 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java @@ -16,10 +16,6 @@ public class GetPipelineRequestBuilder extends MasterNodeReadOperationRequestBui GetPipelineResponse, GetPipelineRequestBuilder> { - public GetPipelineRequestBuilder(ElasticsearchClient client, GetPipelineAction action) { - super(client, action, new GetPipelineRequest()); - } - public GetPipelineRequestBuilder(ElasticsearchClient client, GetPipelineAction action, String[] ids) { super(client, action, new GetPipelineRequest(ids)); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java index 0a68e13a24465..f7a90b94d37ca 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java @@ -16,10 +16,6 @@ public class PutPipelineRequestBuilder extends ActionRequestBuilder { - public PutPipelineRequestBuilder(ElasticsearchClient client, PutPipelineAction action) { - super(client, action, new PutPipelineRequest()); - } - public PutPipelineRequestBuilder( ElasticsearchClient client, PutPipelineAction action, diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java new file mode 100644 index 0000000000000..3363f3caa164b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +/** + * This is an IndexResponse that is specifically for simulate requests. Unlike typical IndexResponses, we need to include the original + * source in a SimulateIndexResponse, and don't need most other fields. This has to extend IndexResponse though so that it can be used by + * BulkItemResponse in IngestService. + */ +public class SimulateIndexResponse extends IndexResponse { + private final BytesReference source; + private final XContentType sourceXContentType; + + @SuppressWarnings("this-escape") + public SimulateIndexResponse(StreamInput in) throws IOException { + super(in); + this.source = in.readBytesReference(); + this.sourceXContentType = XContentType.valueOf(in.readString()); + setShardInfo(new ReplicationResponse.ShardInfo(0, 0)); + } + + @SuppressWarnings("this-escape") + public SimulateIndexResponse( + String id, + String index, + long version, + BytesReference source, + XContentType sourceXContentType, + List pipelines + ) { + // We don't actually care about most of the IndexResponse fields: + super(new ShardId(index, "", 0), id == null ? "" : id, 0, 0, version, true, pipelines); + this.source = source; + this.sourceXContentType = sourceXContentType; + setShardInfo(new ReplicationResponse.ShardInfo(0, 0)); + } + + @Override + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("_id", getId()); + builder.field("_index", getShardId().getIndexName()); + builder.field("_version", getVersion()); + builder.field("_source", XContentHelper.convertToMap(source, false, sourceXContentType).v2()); + assert executedPipelines != null : "executedPipelines is null when it shouldn't be - we always list pipelines in simulate mode"; + builder.array("executed_pipelines", executedPipelines.toArray()); + return builder; + } + + @Override + public RestStatus status() { + return RestStatus.CREATED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBytesReference(source); + out.writeString(sourceXContentType.name()); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("SimulateIndexResponse["); + builder.append("index=").append(getIndex()); + try { + builder.append(",source=").append(XContentHelper.convertToJson(source, false, sourceXContentType)); + } catch (IOException e) { + throw new RuntimeException(e); + } + builder.append(",pipelines=[").append(String.join(", ", executedPipelines)); + return builder.append("]]").toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java index 92ee01f552da4..93f5ab9b78913 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java @@ -15,13 +15,6 @@ public class SimulatePipelineRequestBuilder extends ActionRequestBuilder { - /** - * Create a new builder for {@link SimulatePipelineRequest}s - */ - public SimulatePipelineRequestBuilder(ElasticsearchClient client, SimulatePipelineAction action) { - super(client, action, new SimulatePipelineRequest()); - } - /** * Create a new builder for {@link SimulatePipelineRequest}s */ diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index b56cb0ca5926c..82c2f020a0962 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -160,13 +160,16 @@ abstract class AbstractSearchAsyncAction exten this.executor = executor; this.request = request; this.task = task; - this.listener = ActionListener.runAfter(listener, this::releaseContext); + this.listener = ActionListener.runAfter(listener, () -> Releasables.close(releasables)); this.nodeIdToConnection = nodeIdToConnection; this.concreteIndexBoosts = concreteIndexBoosts; this.clusterStateVersion = clusterState.version(); this.minTransportVersion = clusterState.getMinTransportVersion(); this.aliasFilter = aliasFilter; this.results = resultConsumer; + // register the release of the query consumer to free up the circuit breaker memory + // at the end of the search + addReleasable(resultConsumer::decRef); this.clusters = clusters; } @@ -189,10 +192,6 @@ public void addReleasable(Releasable releasable) { releasables.add(releasable); } - public void releaseContext() { - Releasables.close(releasables); - } - /** * Builds how long it took to execute the search. */ @@ -260,7 +259,7 @@ private boolean checkMinimumVersion(GroupShardsIterator sha if (it.getTargetNodeIds().isEmpty() == false) { boolean isCompatible = it.getTargetNodeIds().stream().anyMatch(nodeId -> { Transport.Connection conn = getConnection(it.getClusterAlias(), nodeId); - return conn == null ? true : conn.getVersion().onOrAfter(request.minCompatibleShardNode()); + return conn == null || conn.getNode().getVersion().onOrAfter(request.minCompatibleShardNode()); }); if (isCompatible == false) { return false; @@ -746,7 +745,7 @@ final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() public final Transport.Connection getConnection(String clusterAlias, String nodeId) { Transport.Connection conn = nodeIdToConnection.apply(clusterAlias, nodeId); Version minVersion = request.minCompatibleShardNode(); - if (minVersion != null && conn != null && conn.getVersion().before(minVersion)) { + if (minVersion != null && conn != null && conn.getNode().getVersion().before(minVersion)) { throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", minVersion); } return conn; diff --git a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java index 9f61042320f3e..b4fd0107f731f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java @@ -9,7 +9,10 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.transport.LeakTracker; import java.util.stream.Stream; @@ -19,6 +22,8 @@ class ArraySearchPhaseResults extends SearchPhaseResults { final AtomicArray results; + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(this::doClose)); + ArraySearchPhaseResults(int size) { super(size); this.results = new AtomicArray<>(size); @@ -32,9 +37,16 @@ Stream getSuccessfulResults() { void consumeResult(Result result, Runnable next) { assert results.get(result.getShardIndex()) == null : "shardIndex: " + result.getShardIndex() + " is already set"; results.set(result.getShardIndex(), result); + result.incRef(); next.run(); } + protected void doClose() { + for (Result result : getAtomicArray().asList()) { + result.decRef(); + } + } + boolean hasResult(int shardIndex) { return results.get(shardIndex) != null; } @@ -43,4 +55,24 @@ boolean hasResult(int shardIndex) { AtomicArray getAtomicArray() { return results; } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 6e553f254ee8b..9900ee9d824ae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -379,7 +379,7 @@ private boolean checkMinimumVersion(GroupShardsIterator sha if (it.getTargetNodeIds().isEmpty() == false) { boolean isCompatible = it.getTargetNodeIds().stream().anyMatch(nodeId -> { Transport.Connection conn = getConnection(new SendingTarget(it.getClusterAlias(), nodeId)); - return conn == null || conn.getVersion().onOrAfter(request.minCompatibleShardNode()); + return conn == null || conn.getNode().getVersion().onOrAfter(request.minCompatibleShardNode()); }); if (isCompatible == false) { return false; @@ -419,7 +419,7 @@ public void onPhaseFailure(String msg, Exception cause) { public Transport.Connection getConnection(SendingTarget sendingTarget) { Transport.Connection conn = nodeIdToConnection.apply(sendingTarget.clusterAlias, sendingTarget.nodeId); Version minVersion = request.minCompatibleShardNode(); - if (minVersion != null && conn != null && conn.getVersion().before(minVersion)) { + if (minVersion != null && conn != null && conn.getNode().getVersion().before(minVersion)) { throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", minVersion); } return conn; @@ -480,6 +480,26 @@ synchronized FixedBitSet getPossibleMatches() { Stream getSuccessfulResults() { return Stream.empty(); } + + @Override + public void incRef() { + + } + + @Override + public boolean tryIncRef() { + return false; + } + + @Override + public boolean decRef() { + return false; + } + + @Override + public boolean hasReferences() { + return false; + } } private GroupShardsIterator getIterator( diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java deleted file mode 100644 index ceee61bc47934..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class ClearScrollAction extends ActionType { - - public static final ClearScrollAction INSTANCE = new ClearScrollAction(); - public static final String NAME = "indices:data/read/scroll/clear"; - - private ClearScrollAction() { - super(NAME, ClearScrollResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java index 42b734715bd89..2311a5f65eb40 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequestBuilder.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.internal.ElasticsearchClient; import java.util.List; public class ClearScrollRequestBuilder extends ActionRequestBuilder { - public ClearScrollRequestBuilder(ElasticsearchClient client, ClearScrollAction action) { + public ClearScrollRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new ClearScrollRequest()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeAction.java deleted file mode 100644 index ae9757b5b516d..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeAction.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class ClosePointInTimeAction extends ActionType { - - public static final ClosePointInTimeAction INSTANCE = new ClosePointInTimeAction(); - public static final String NAME = "indices:data/read/close_point_in_time"; - - private ClosePointInTimeAction() { - super(NAME, ClosePointInTimeResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java index 34b33770efd55..d5605b280f385 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java @@ -25,6 +25,7 @@ final class CountedCollector { CountedCollector(ArraySearchPhaseResults resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { this.resultConsumer = resultConsumer; + resultConsumer.incRef(); this.counter = new CountDown(expectedOps); this.onFinish = onFinish; this.context = context; @@ -37,7 +38,11 @@ final class CountedCollector { void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { - onFinish.run(); + try { + onFinish.run(); + } finally { + resultConsumer.decRef(); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index e010e840d3f2d..ce2c86be4b4e6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -66,7 +66,7 @@ final class DfsQueryPhase extends SearchPhase { // register the release of the query consumer to free up the circuit breaker memory // at the end of the search - context.addReleasable(queryResult); + context.addReleasable(queryResult::decRef); } @Override @@ -95,7 +95,7 @@ public void run() { connection, querySearchRequest, context.getTask(), - new SearchActionListener(shardTarget, shardIndex) { + new SearchActionListener<>(shardTarget, shardIndex) { @Override protected void innerOnResponse(QuerySearchResult response) { diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index d0a4ca14ee4f3..e8d3ded154f55 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -9,10 +9,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.RescoreDocIds; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -20,7 +18,6 @@ import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; -import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.Transport; @@ -70,6 +67,7 @@ final class FetchSearchPhase extends SearchPhase { ); } this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); + context.addReleasable(fetchResults::decRef); this.queryResults = resultConsumer.getAtomicArray(); this.aggregatedDfs = aggregatedDfs; this.nextPhaseFactory = nextPhaseFactory; @@ -99,40 +97,32 @@ public void onFailure(Exception e) { private void innerRun() throws Exception { final int numShards = context.getNumShards(); - final boolean isScrollSearch = context.getRequest().scroll() != null; - final List phaseResults = queryResults.asList(); final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); // Usually when there is a single shard, we force the search type QUERY_THEN_FETCH. But when there's kNN, we might // still use DFS_QUERY_THEN_FETCH, which does not perform the "query and fetch" optimization during the query phase. final boolean queryAndFetchOptimization = queryResults.length() == 1 && context.getRequest().hasKnnSearch() == false && reducedQueryPhase.rankCoordinatorContext() == null; - final Runnable finishPhase = () -> moveToNextPhase( - queryResults, - reducedQueryPhase, - queryAndFetchOptimization ? queryResults : fetchResults.getAtomicArray() - ); if (queryAndFetchOptimization) { - assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null - : "phaseResults empty [" + phaseResults.isEmpty() + "], single result: " + phaseResults.get(0).fetchResult(); + assert assertConsistentWithQueryAndFetchOptimization(); // query AND fetch optimization - finishPhase.run(); + moveToNextPhase(reducedQueryPhase, queryResults); } else { ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs(); - final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); // no docs to fetch -- sidestep everything and return if (scoreDocs.length == 0) { // we have to release contexts here to free up resources - phaseResults.stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); - finishPhase.run(); + queryResults.asList().stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); + moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()); } else { - final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch + final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) : null; + final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); final CountedCollector counter = new CountedCollector<>( fetchResults, docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not - finishPhase, + () -> moveToNextPhase(reducedQueryPhase, fetchResults.getAtomicArray()), context ); for (int i = 0; i < docIdsToLoad.length; i++) { @@ -149,66 +139,43 @@ private void innerRun() throws Exception { // in any case we count down this result since we don't talk to this shard anymore counter.countDown(); } else { - SearchShardTarget shardTarget = queryResult.getSearchShardTarget(); - Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); - ShardFetchSearchRequest fetchSearchRequest = createFetchRequest( - queryResult.queryResult().getContextId(), - i, - entry, - lastEmittedDocPerShard, - context.getOriginalIndices(queryResult.getShardIndex()), - queryResult.getShardSearchRequest(), - queryResult.getRescoreDocIds() - ); - executeFetch( - queryResult.getShardIndex(), - shardTarget, - counter, - fetchSearchRequest, - queryResult.queryResult(), - connection - ); + executeFetch(queryResult, counter, entry, (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[i] : null); } } } } } - protected ShardFetchSearchRequest createFetchRequest( - ShardSearchContextId contextId, - int index, - List entry, - ScoreDoc[] lastEmittedDocPerShard, - OriginalIndices originalIndices, - ShardSearchRequest shardSearchRequest, - RescoreDocIds rescoreDocIds - ) { - final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null; - return new ShardFetchSearchRequest( - originalIndices, - contextId, - shardSearchRequest, - entry, - lastEmittedDoc, - rescoreDocIds, - aggregatedDfs - ); + private boolean assertConsistentWithQueryAndFetchOptimization() { + var phaseResults = queryResults.asList(); + assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null + : "phaseResults empty [" + phaseResults.isEmpty() + "], single result: " + phaseResults.get(0).fetchResult(); + return true; } private void executeFetch( - final int shardIndex, - final SearchShardTarget shardTarget, + SearchPhaseResult queryResult, final CountedCollector counter, - final ShardFetchSearchRequest fetchSearchRequest, - final QuerySearchResult querySearchResult, - final Transport.Connection connection + final List entry, + ScoreDoc lastEmittedDocForShard ) { + final SearchShardTarget shardTarget = queryResult.getSearchShardTarget(); + final int shardIndex = queryResult.getShardIndex(); + final ShardSearchContextId contextId = queryResult.queryResult().getContextId(); context.getSearchTransport() .sendExecuteFetch( - connection, - fetchSearchRequest, + context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()), + new ShardFetchSearchRequest( + context.getOriginalIndices(queryResult.getShardIndex()), + contextId, + queryResult.getShardSearchRequest(), + entry, + lastEmittedDocForShard, + queryResult.getRescoreDocIds(), + aggregatedDfs + ), context.getTask(), - new SearchActionListener(shardTarget, shardIndex) { + new SearchActionListener<>(shardTarget, shardIndex) { @Override public void innerOnResponse(FetchSearchResult result) { try { @@ -222,14 +189,14 @@ public void innerOnResponse(FetchSearchResult result) { @Override public void onFailure(Exception e) { try { - logger.debug(() -> "[" + fetchSearchRequest.contextId() + "] Failed to execute fetch phase", e); + logger.debug(() -> "[" + contextId + "] Failed to execute fetch phase", e); progressListener.notifyFetchFailure(shardIndex, shardTarget, e); counter.onFailure(shardIndex, shardTarget, e); } finally { // the search context might not be cleared on the node where the fetch was executed for example // because the action was rejected by the thread pool. in this case we need to send a dedicated // request to clear the search context. - releaseIrrelevantSearchContext(querySearchResult); + releaseIrrelevantSearchContext(queryResult.queryResult()); } } } @@ -260,16 +227,14 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { } private void moveToNextPhase( - AtomicArray queryPhaseResults, SearchPhaseController.ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArr ) { final InternalSearchResponse internalResponse = SearchPhaseController.merge( context.getRequest().scroll() != null, reducedQueryPhase, - fetchResultsArr.asList(), - fetchResultsArr::get + fetchResultsArr ); - context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryPhaseResults)); + context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryResults)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java deleted file mode 100644 index faea4b88e5c78..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class MultiSearchAction extends ActionType { - - public static final MultiSearchAction INSTANCE = new MultiSearchAction(); - public static final String NAME = "indices:data/read/msearch"; - - private MultiSearchAction() { - super(NAME, MultiSearchResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index 57c536f3d371e..20888d652c8ac 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.ElasticsearchClient; @@ -17,7 +18,7 @@ */ public class MultiSearchRequestBuilder extends ActionRequestBuilder { - public MultiSearchRequestBuilder(ElasticsearchClient client, MultiSearchAction action) { + public MultiSearchRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new MultiSearchRequest()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeAction.java deleted file mode 100644 index 560f8aea1da5b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class OpenPointInTimeAction extends ActionType { - public static final String NAME = "indices:data/read/open_point_in_time"; - public static final OpenPointInTimeAction INSTANCE = new OpenPointInTimeAction(); - - private OpenPointInTimeAction() { - super(NAME, OpenPointInTimeResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index 633e56b97a833..39813a883c428 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -38,6 +39,8 @@ public final class OpenPointInTimeRequest extends ActionRequest implements Indic @Nullable private String preference; + private QueryBuilder indexFilter; + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = SearchRequest.DEFAULT_INDICES_OPTIONS; public OpenPointInTimeRequest(String... indices) { @@ -54,6 +57,9 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { this.maxConcurrentShardRequests = in.readVInt(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { + this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + } } @Override @@ -67,6 +73,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeVInt(maxConcurrentShardRequests); } + if (out.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { + out.writeOptionalWriteable(indexFilter); + } } @Override @@ -153,6 +162,14 @@ public void maxConcurrentShardRequests(int maxConcurrentShardRequests) { this.maxConcurrentShardRequests = maxConcurrentShardRequests; } + public void indexFilter(QueryBuilder indexFilter) { + this.indexFilter = indexFilter; + } + + public QueryBuilder indexFilter() { + return indexFilter; + } + @Override public boolean allowsRemoteIndices() { return true; diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index ee956b5179902..b7b113601560b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -39,7 +39,6 @@ import java.util.function.Consumer; import java.util.function.Supplier; -import static java.util.stream.Collectors.toCollection; import static org.elasticsearch.action.search.SearchPhaseController.getTopDocsSize; import static org.elasticsearch.action.search.SearchPhaseController.mergeTopDocs; import static org.elasticsearch.action.search.SearchPhaseController.setShardIndex; @@ -52,7 +51,7 @@ * needed to reduce the aggregations is estimated and a {@link CircuitBreakingException} is thrown if it * exceeds the maximum memory allowed in this breaker. */ -public class QueryPhaseResultConsumer extends ArraySearchPhaseResults implements Releasable { +public class QueryPhaseResultConsumer extends ArraySearchPhaseResults { private static final Logger logger = LogManager.getLogger(QueryPhaseResultConsumer.class); private final Executor executor; @@ -105,8 +104,12 @@ public QueryPhaseResultConsumer( } @Override - public void close() { - Releasables.close(pendingMerges); + protected void doClose() { + try { + super.doClose(); + } finally { + pendingMerges.close(); + } } @Override @@ -269,12 +272,9 @@ public synchronized void close() { assert circuitBreakerBytes >= 0; } - List toRelease = buffer.stream().map(b -> b::releaseAggs).collect(toCollection(ArrayList::new)); - toRelease.add(() -> { - circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); - circuitBreakerBytes = 0; - }); - Releasables.close(toRelease); + releaseBuffer(); + circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); + circuitBreakerBytes = 0; if (hasPendingMerges()) { // This is a theoretically unreachable exception. @@ -300,11 +300,10 @@ void sortBuffer() { } } - synchronized long addWithoutBreaking(long size) { + synchronized void addWithoutBreaking(long size) { circuitBreaker.addWithoutBreaking(size); circuitBreakerBytes += size; maxAggsCurrentBufferSize = Math.max(maxAggsCurrentBufferSize, circuitBreakerBytes); - return circuitBreakerBytes; } synchronized long addEstimateAndMaybeBreak(long estimatedSize) { @@ -350,8 +349,7 @@ public void consume(QuerySearchResult result, Runnable next) { addEstimateAndMaybeBreak(aggsSize); } catch (Exception exc) { result.releaseAggs(); - buffer.forEach(QuerySearchResult::releaseAggs); - buffer.clear(); + releaseBuffer(); onMergeFailure(exc); next.run(); return; @@ -379,6 +377,11 @@ public void consume(QuerySearchResult result, Runnable next) { } } + private void releaseBuffer() { + buffer.forEach(QuerySearchResult::releaseAggs); + buffer.clear(); + } + private synchronized void onMergeFailure(Exception exc) { if (hasFailure()) { assert circuitBreakerBytes == 0; diff --git a/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java index a9da16bd62026..64702501581ea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestClosePointInTimeAction.java @@ -41,7 +41,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC clearRequest = ClosePointInTimeRequest.fromXContent(parser); } return channel -> client.execute( - ClosePointInTimeAction.INSTANCE, + TransportClosePointInTimeAction.TYPE, clearRequest, new RestToXContentListener<>(channel, ClosePointInTimeResponse::status) ); diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index 815deac07dfcd..0e7f3f9111842 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -17,9 +17,13 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import java.io.IOException; import java.util.List; +import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; import static org.elasticsearch.rest.RestRequest.Method.POST; @ServerlessScope(Scope.PUBLIC) @@ -36,7 +40,7 @@ public List routes() { } @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indices); openRequest.indicesOptions(IndicesOptions.fromRequest(request, OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS)); @@ -50,6 +54,20 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ); openRequest.maxConcurrentShardRequests(maxConcurrentShardRequests); } - return channel -> client.execute(OpenPointInTimeAction.INSTANCE, openRequest, new RestToXContentListener<>(channel)); + + request.withContentOrSourceParamParserOrNull(parser -> { + if (parser != null) { + PARSER.parse(parser, openRequest, null); + } + }); + + return channel -> client.execute(TransportOpenPointInTimeAction.TYPE, openRequest, new RestToXContentListener<>(channel)); + } + + private static final ObjectParser PARSER = new ObjectParser<>("open_point_in_time_request"); + private static final ParseField INDEX_FILTER_FIELD = new ParseField("index_filter"); + + static { + PARSER.declareObject(OpenPointInTimeRequest::indexFilter, (p, c) -> parseTopLevelQuery(p), INDEX_FILTER_FIELD); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index c7ad250892160..2fcb792f821c9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -64,6 +64,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction clusters ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; + addReleasable(queryPhaseResultConsumer::decRef); this.progressListener = task.getProgressListener(); // don't build the SearchShard list (can be expensive) if the SearchProgressListener won't use it if (progressListener != SearchProgressListener.NOOP) { @@ -90,7 +91,7 @@ protected SearchPhase getNextPhase(final SearchPhaseResults res final List dfsSearchResults = results.getAtomicArray().asList(); final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults); final List mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults); - + queryPhaseResultConsumer.incRef(); return new DfsQueryPhase( dfsSearchResults, aggregatedDfs, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 5af5c4c2ec602..0662e94b519d9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; @@ -60,7 +61,6 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.IntFunction; import java.util.function.Supplier; public final class SearchPhaseController { @@ -351,52 +351,58 @@ public static List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDo public static InternalSearchResponse merge( boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, - Collection fetchResults, - IntFunction resultsLookup + AtomicArray fetchResultsArray ) { if (reducedQueryPhase.isEmptyResult) { return InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; - SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResults, resultsLookup); - if (reducedQueryPhase.suggest != null) { - if (fetchResults.isEmpty() == false) { - int currentOffset = hits.getHits().length; - for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) { - final List suggestionOptions = suggestion.getOptions(); - for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { - ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; - SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex); - if (searchResultProvider == null) { - // this can happen if we are hitting a shard failure during the fetch phase - // in this case we referenced the shard result via the ScoreDoc but never got a - // result from fetch. - // TODO it would be nice to assert this in the future - continue; - } - FetchSearchResult fetchResult = searchResultProvider.fetchResult(); - final int index = fetchResult.counterGetAndIncrement(); - assert index < fetchResult.hits().getHits().length - : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; - SearchHit hit = fetchResult.hits().getHits()[index]; - CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); - hit.score(shardDoc.score); - hit.shard(fetchResult.getSearchShardTarget()); - suggestOption.setHit(hit); - } - currentOffset += suggestionOptions.size(); + var fetchResults = fetchResultsArray.asList(); + SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResultsArray); + if (reducedQueryPhase.suggest != null && fetchResults.isEmpty() == false) { + mergeSuggest(reducedQueryPhase, fetchResultsArray, hits, sortedDocs); + } + return reducedQueryPhase.buildResponse(hits, fetchResults); + } + + private static void mergeSuggest( + ReducedQueryPhase reducedQueryPhase, + AtomicArray fetchResultsArray, + SearchHits hits, + ScoreDoc[] sortedDocs + ) { + int currentOffset = hits.getHits().length; + for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) { + final List suggestionOptions = suggestion.getOptions(); + for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { + ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; + SearchPhaseResult searchResultProvider = fetchResultsArray.get(shardDoc.shardIndex); + if (searchResultProvider == null) { + // this can happen if we are hitting a shard failure during the fetch phase + // in this case we referenced the shard result via the ScoreDoc but never got a + // result from fetch. + // TODO it would be nice to assert this in the future + continue; } - assert currentOffset == sortedDocs.length : "expected no more score doc slices"; + FetchSearchResult fetchResult = searchResultProvider.fetchResult(); + final int index = fetchResult.counterGetAndIncrement(); + assert index < fetchResult.hits().getHits().length + : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; + SearchHit hit = fetchResult.hits().getHits()[index]; + CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); + hit.score(shardDoc.score); + hit.shard(fetchResult.getSearchShardTarget()); + suggestOption.setHit(hit); } + currentOffset += suggestionOptions.size(); } - return reducedQueryPhase.buildResponse(hits, fetchResults); + assert currentOffset == sortedDocs.length : "expected no more score doc slices"; } private static SearchHits getHits( ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, - Collection fetchResults, - IntFunction resultsLookup + AtomicArray fetchResultsArray ) { SortedTopDocs sortedTopDocs = reducedQueryPhase.sortedTopDocs; int sortScoreIndex = -1; @@ -408,6 +414,7 @@ private static SearchHits getHits( } } } + var fetchResults = fetchResultsArray.asList(); // clean the fetch counter for (SearchPhaseResult entry : fetchResults) { entry.fetchResult().initCounter(); @@ -422,7 +429,7 @@ private static SearchHits getHits( if (fetchResults.isEmpty() == false) { for (int i = 0; i < numSearchHits; i++) { ScoreDoc shardDoc = sortedTopDocs.scoreDocs[i]; - SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex); + SearchPhaseResult fetchResultProvider = fetchResultsArray.get(shardDoc.shardIndex); if (fetchResultProvider == null) { // this can happen if we are hitting a shard failure during the fetch phase // in this case we referenced the shard result via the ScoreDoc but never got a @@ -737,7 +744,7 @@ public record ReducedQueryPhase( /** * Creates a new search response from the given merged hits. - * @see #merge(boolean, ReducedQueryPhase, Collection, IntFunction) + * @see #merge(boolean, ReducedQueryPhase, AtomicArray) */ public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { return new InternalSearchResponse( @@ -753,10 +760,8 @@ public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { if (profileBuilder == null) { - assert fetchResults.stream() - .map(SearchPhaseResult::fetchResult) - .filter(r -> r != null) - .allMatch(r -> r.profileResult() == null) : "found fetch profile without search profile"; + assert fetchResults.stream().map(SearchPhaseResult::fetchResult).allMatch(r -> r == null || r.profileResult() == null) + : "found fetch profile without search profile"; return null; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java index edabbc86b4b31..11b8e0a0792a3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.SearchPhaseResult; import java.util.stream.Stream; @@ -16,7 +17,7 @@ /** * This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing */ -abstract class SearchPhaseResults { +abstract class SearchPhaseResults implements RefCounted { private final int numShards; SearchPhaseResults(int numShards) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java b/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java index 26466215a3e85..95e9b4cedeba5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchProgressActionListener.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionListener; /** - * An {@link ActionListener} for search requests that allows to track progress of the {@link SearchAction}. + * An {@link ActionListener} for search requests that allows to track progress of the {@link TransportSearchAction}. * See {@link SearchProgressListener}. */ public abstract class SearchProgressActionListener extends SearchProgressListener implements ActionListener {} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java index c6b0022593179..096f2606d3f02 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java @@ -23,7 +23,7 @@ import java.util.stream.StreamSupport; /** - * A listener that allows to track progress of the {@link SearchAction}. + * A listener that allows to track progress of the {@link TransportSearchAction}. */ public abstract class SearchProgressListener { private static final Logger logger = LogManager.getLogger(SearchProgressListener.class); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 2dfd46182266c..8cf4ee9b75f76 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -74,9 +74,6 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { - public SearchRequestBuilder(ElasticsearchClient client, SearchAction action) { + public SearchRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new SearchRequest()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java deleted file mode 100644 index 25f0daab932da..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class SearchScrollAction extends ActionType { - - public static final SearchScrollAction INSTANCE = new SearchScrollAction(); - public static final String NAME = "indices:data/read/scroll"; - - private SearchScrollAction() { - super(NAME, SearchResponse::new); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index df16c107a2619..fc1ccfb00d6ce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -240,12 +240,7 @@ protected final void sendResponse( final AtomicArray fetchResults ) { try { - final InternalSearchResponse internalResponse = SearchPhaseController.merge( - true, - queryPhase, - fetchResults.asList(), - fetchResults::get - ); + final InternalSearchResponse internalResponse = SearchPhaseController.merge(true, queryPhase, fetchResults); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them again in the next roundtrip. String scrollId = null; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index bf6517e97a842..bad0ed488d03b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -99,13 +99,12 @@ public void run() { connection, shardFetchRequest, task, - new SearchActionListener(querySearchResult.getSearchShardTarget(), index) { + new SearchActionListener<>(querySearchResult.getSearchShardTarget(), index) { @Override protected void innerOnResponse(FetchSearchResult response) { fetchResults.setOnce(response.getShardIndex(), response); - if (counter.countDown()) { - sendResponse(reducedQueryPhase, fetchResults); - } + response.incRef(); + consumeResponse(counter, reducedQueryPhase); } @Override @@ -124,13 +123,20 @@ public void onFailure(Exception t) { } else { // the counter is set to the total size of docIdsToLoad // which can have null values so we have to count them down too - if (counter.countDown()) { - sendResponse(reducedQueryPhase, fetchResults); - } + consumeResponse(counter, reducedQueryPhase); } } } }; } + private void consumeResponse(CountDown counter, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { + if (counter.countDown()) { + sendResponse(reducedQueryPhase, fetchResults); + for (FetchSearchResult fetchSearchResult : fetchResults.asList()) { + fetchSearchResult.decRef(); + } + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java index e8348d189fcbc..4de27b8430417 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionType; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.Scroll; @@ -18,11 +19,11 @@ */ public class SearchScrollRequestBuilder extends ActionRequestBuilder { - public SearchScrollRequestBuilder(ElasticsearchClient client, SearchScrollAction action) { + public SearchScrollRequestBuilder(ElasticsearchClient client, ActionType action) { super(client, action, new SearchScrollRequest()); } - public SearchScrollRequestBuilder(ElasticsearchClient client, SearchScrollAction action, String scrollId) { + public SearchScrollRequestBuilder(ElasticsearchClient client, ActionType action, String scrollId) { super(client, action, new SearchScrollRequest(scrollId)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardsAction.java deleted file mode 100644 index f4bfc2623fe1c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardsAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.action.ActionType; - -public class SearchShardsAction extends ActionType { - public static final String NAME = "indices:admin/search/search_shards"; - public static final SearchShardsAction INSTANCE = new SearchShardsAction(); - - private SearchShardsAction() { - super(NAME, SearchShardsResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 800ad7afbb8db..e46d26c3532ad 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -26,10 +24,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -58,12 +54,9 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.BiFunction; /** @@ -82,7 +75,6 @@ public class SearchTransportService { public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]"; public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; - public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]"; public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]"; private final TransportService transportService; @@ -137,79 +129,20 @@ public void sendFreeContext( public void sendCanMatch( Transport.Connection connection, - final ShardSearchRequest request, + final CanMatchNodeRequest request, SearchTask task, - final ActionListener listener + final ActionListener listener ) { transportService.sendChildRequest( connection, - QUERY_CAN_MATCH_NAME, + QUERY_CAN_MATCH_NODE_NAME, request, task, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, CanMatchShardResponse::new, TransportResponseHandler.TRANSPORT_WORKER) + new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new, TransportResponseHandler.TRANSPORT_WORKER) ); } - public void sendCanMatch( - Transport.Connection connection, - final CanMatchNodeRequest request, - SearchTask task, - final ActionListener listener - ) { - if (connection.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) - && connection.getNode().getVersion().onOrAfter(Version.V_7_16_0)) { - transportService.sendChildRequest( - connection, - QUERY_CAN_MATCH_NODE_NAME, - request, - task, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new, TransportResponseHandler.TRANSPORT_WORKER) - ); - } else { - // BWC layer: translate into shard-level requests - final List shardSearchRequests = request.createShardSearchRequests(); - final AtomicReferenceArray results = new AtomicReferenceArray<>( - shardSearchRequests.size() - ); - final CountDown counter = new CountDown(shardSearchRequests.size()); - final Runnable maybeFinish = () -> { - if (counter.countDown()) { - final CanMatchNodeResponse.ResponseOrFailure[] responses = - new CanMatchNodeResponse.ResponseOrFailure[shardSearchRequests.size()]; - for (int i = 0; i < responses.length; i++) { - responses[i] = results.get(i); - } - final CanMatchNodeResponse response = new CanMatchNodeResponse(Arrays.asList(responses)); - listener.onResponse(response); - } - }; - for (int i = 0; i < shardSearchRequests.size(); i++) { - final ShardSearchRequest shardSearchRequest = shardSearchRequests.get(i); - final int finalI = i; - try { - sendCanMatch(connection, shardSearchRequest, task, new ActionListener<>() { - @Override - public void onResponse(CanMatchShardResponse response) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(response)); - maybeFinish.run(); - } - - @Override - public void onFailure(Exception e) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); - maybeFinish.run(); - } - }); - } catch (Exception e) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); - maybeFinish.run(); - } - } - } - } - public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { transportService.sendRequest( connection, @@ -347,7 +280,7 @@ void sendExecuteMultiSearch(final MultiSearchRequest request, SearchTask task, f final Transport.Connection connection = transportService.getConnection(transportService.getLocalNode()); transportService.sendChildRequest( connection, - MultiSearchAction.NAME, + TransportMultiSearchAction.TYPE.name(), request, task, new ConnectionCountingHandler<>(listener, MultiSearchResponse::new, clientConnections, connection.getNode().getId()) @@ -565,24 +498,11 @@ public static void registerRequestHandler(TransportService transportService, Sea ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); - // this is cheap, it does not fetch during the rewrite phase, so we can let it quickly execute on a networking thread - transportService.registerRequestHandler( - QUERY_CAN_MATCH_NAME, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - ShardSearchRequest::new, - (request, channel, task) -> { - searchService.canMatch(request, new ChannelActionListener<>(channel)); - } - ); - TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, true, CanMatchShardResponse::new); - transportService.registerRequestHandler( QUERY_CAN_MATCH_NODE_NAME, transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION), CanMatchNodeRequest::new, - (request, channel, task) -> { - searchService.canMatch(request, new ChannelActionListener<>(channel)); - } + (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index 8de2815a9d416..e1a6bb6c42b2e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; @@ -19,6 +20,9 @@ public class TransportClearScrollAction extends HandledTransportAction { + public static final String NAME = "indices:data/read/scroll/clear"; + + public static final ActionType TYPE = new ActionType<>(NAME, ClearScrollResponse::new); private final ClusterService clusterService; private final SearchTransportService searchTransportService; @@ -29,7 +33,7 @@ public TransportClearScrollAction( ActionFilters actionFilters, SearchTransportService searchTransportService ) { - super(ClearScrollAction.NAME, transportService, actionFilters, ClearScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, ClearScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.searchTransportService = searchTransportService; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java index 0434cb2f5895e..338e63d6af2a6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; @@ -22,6 +23,10 @@ public class TransportClosePointInTimeAction extends HandledTransportAction { + public static final ActionType TYPE = new ActionType<>( + "indices:data/read/close_point_in_time", + ClosePointInTimeResponse::new + ); private final ClusterService clusterService; private final SearchTransportService searchTransportService; private final NamedWriteableRegistry namedWriteableRegistry; @@ -34,13 +39,7 @@ public TransportClosePointInTimeAction( SearchTransportService searchTransportService, NamedWriteableRegistry namedWriteableRegistry ) { - super( - ClosePointInTimeAction.NAME, - transportService, - actionFilters, - ClosePointInTimeRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(TYPE.name(), transportService, actionFilters, ClosePointInTimeRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.searchTransportService = searchTransportService; this.namedWriteableRegistry = namedWriteableRegistry; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index a2324010876bf..be892f0a0b982 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -8,7 +8,11 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.node.NodeClient; @@ -31,6 +35,9 @@ public class TransportMultiSearchAction extends HandledTransportAction { + public static final String NAME = "indices:data/read/msearch"; + public static final ActionType TYPE = new ActionType<>(NAME, MultiSearchResponse::new); + private static final Logger logger = LogManager.getLogger(TransportMultiSearchAction.class); private final int allocatedProcessors; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -46,7 +53,7 @@ public TransportMultiSearchAction( ActionFilters actionFilters, NodeClient client ) { - super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = EsExecutors.allocatedProcessors(settings); @@ -63,7 +70,7 @@ public TransportMultiSearchAction( LongSupplier relativeTimeProvider, NodeClient client ) { - super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = allocatedProcessors; @@ -155,6 +162,9 @@ public void onResponse(final SearchResponse searchResponse) { @Override public void onFailure(final Exception e) { + if (ExceptionsHelper.status(e).getStatus() >= 500 && ExceptionsHelper.isNodeOrShardUnavailableTypeException(e) == false) { + logger.warn("TransportMultiSearchAction failure", e); + } handleResponse(request.responseSlot, new MultiSearchResponse.Item(null, e)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index ae3c735e079e9..2bc642e6c0907 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -8,8 +8,11 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionFilters; @@ -28,6 +31,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; @@ -47,7 +51,14 @@ import java.util.function.BiFunction; public class TransportOpenPointInTimeAction extends HandledTransportAction { + + private static final Logger logger = LogManager.getLogger(TransportOpenPointInTimeAction.class); + public static final String OPEN_SHARD_READER_CONTEXT_NAME = "indices:data/read/open_reader_context"; + public static final ActionType TYPE = new ActionType<>( + "indices:data/read/open_point_in_time", + OpenPointInTimeResponse::new + ); private final TransportSearchAction transportSearchAction; private final SearchTransportService searchTransportService; @@ -62,13 +73,7 @@ public TransportOpenPointInTimeAction( TransportSearchAction transportSearchAction, SearchTransportService searchTransportService ) { - super( - OpenPointInTimeAction.NAME, - transportService, - actionFilters, - OpenPointInTimeRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(TYPE.name(), transportService, actionFilters, OpenPointInTimeRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.transportService = transportService; this.transportSearchAction = transportSearchAction; this.searchService = searchService; @@ -93,7 +98,8 @@ protected void doExecute(Task task, OpenPointInTimeRequest request, ActionListen .indicesOptions(request.indicesOptions()) .preference(request.preference()) .routing(request.routing()) - .allowPartialSearchResults(false); + .allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(request.indexFilter())); searchRequest.setMaxConcurrentShardRequests(request.maxConcurrentShardRequests()); searchRequest.setCcsMinimizeRoundtrips(false); transportSearchAction.executeRequest((SearchTask) task, searchRequest, listener.map(r -> { @@ -125,6 +131,63 @@ public SearchPhase newSearchPhase( boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters + ) { + if (SearchService.canRewriteToMatchNone(searchRequest.source())) { + return new CanMatchPreFilterSearchPhase( + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION), + searchRequest, + shardIterators, + timeProvider, + task, + false, + searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), + listener.delegateFailureAndWrap( + (searchResponseActionListener, searchShardIterators) -> openPointInTimePhase( + task, + searchRequest, + executor, + searchShardIterators, + timeProvider, + connectionLookup, + clusterState, + aliasFilter, + concreteIndexBoosts, + clusters + ).start() + ) + ); + } else { + return openPointInTimePhase( + task, + searchRequest, + executor, + shardIterators, + timeProvider, + connectionLookup, + clusterState, + aliasFilter, + concreteIndexBoosts, + clusters + ); + } + } + + SearchPhase openPointInTimePhase( + SearchTask task, + SearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardIterators, + TransportSearchAction.SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + SearchResponse.Clusters clusters ) { assert searchRequest.getMaxConcurrentShardRequests() == pitRequest.maxConcurrentShardRequests() : searchRequest.getMaxConcurrentShardRequests() + " != " + pitRequest.maxConcurrentShardRequests(); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 5030bd875a0f6..38d448a8a9372 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -10,11 +10,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -109,6 +112,8 @@ public class TransportSearchAction extends HandledTransportAction { + public static final String NAME = "indices:data/read/search"; + public static final ActionType TYPE = new ActionType<>(NAME, SearchResponse::new); private static final Logger logger = LogManager.getLogger(TransportSearchAction.class); private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(TransportSearchAction.class); public static final String FROZEN_INDICES_DEPRECATION_MESSAGE = "Searching frozen indices [{}] is deprecated." @@ -158,7 +163,7 @@ public TransportSearchAction( NamedWriteableRegistry namedWriteableRegistry, ExecutorSelector executorSelector ) { - super(SearchAction.NAME, transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.circuitBreaker = circuitBreakerService.getBreaker(CircuitBreaker.REQUEST); this.searchPhaseController = searchPhaseController; @@ -278,7 +283,24 @@ public long buildTookInMillis() { @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { - executeRequest((SearchTask) task, searchRequest, listener, AsyncSearchActionProvider::new); + ActionListener loggingListener = listener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { + // Deduplicate failures by exception message and index + ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); + for (ShardOperationFailedException f : groupedFailures) { + boolean causeHas500Status = false; + if (f.getCause() != null) { + causeHas500Status = ExceptionsHelper.status(f.getCause()).getStatus() >= 500; + } + if ((f.status().getStatus() >= 500 || causeHas500Status) + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(f.getCause()) == false) { + logger.warn("TransportSearchAction shard failure (partial results response)", f); + } + } + } + l.onResponse(searchResponse); + }); + executeRequest((SearchTask) task, searchRequest, loggingListener, AsyncSearchActionProvider::new); } void executeRequest( @@ -703,7 +725,7 @@ Map createFinalResponse() { ); transportService.sendRequest( connection, - SearchShardsAction.NAME, + TransportSearchShardsAction.TYPE.name(), searchShardsRequest, TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(singleListener, SearchShardsResponse::new, responseExecutor) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index d097b10b7162d..0a2b496a5eb8a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -8,7 +8,12 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; @@ -22,7 +27,8 @@ import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId; public class TransportSearchScrollAction extends HandledTransportAction { - + public static final ActionType TYPE = new ActionType<>("indices:data/read/scroll", SearchResponse::new); + private static final Logger logger = LogManager.getLogger(TransportSearchScrollAction.class); private final ClusterService clusterService; private final SearchTransportService searchTransportService; @@ -33,13 +39,26 @@ public TransportSearchScrollAction( ActionFilters actionFilters, SearchTransportService searchTransportService ) { - super(SearchScrollAction.NAME, transportService, actionFilters, SearchScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super(TYPE.name(), transportService, actionFilters, SearchScrollRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.searchTransportService = searchTransportService; } @Override protected void doExecute(Task task, SearchScrollRequest request, ActionListener listener) { + ActionListener loggingListener = listener.delegateFailureAndWrap((l, searchResponse) -> { + if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { + ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); + for (ShardOperationFailedException f : groupedFailures) { + Throwable cause = f.getCause() == null ? f : f.getCause(); + if (ExceptionsHelper.status(cause).getStatus() >= 500 + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(cause) == false) { + logger.warn("TransportSearchScrollAction shard failure (partial results response)", f); + } + } + } + l.onResponse(searchResponse); + }); try { ParsedScrollId scrollId = parseScrollId(request.scrollId()); Runnable action = switch (scrollId.getType()) { @@ -50,7 +69,7 @@ protected void doExecute(Task task, SearchScrollRequest request, ActionListener< request, (SearchTask) task, scrollId, - listener + loggingListener ); case QUERY_AND_FETCH_TYPE -> // TODO can we get rid of this? new SearchScrollQueryAndFetchAsyncAction( @@ -60,7 +79,7 @@ protected void doExecute(Task task, SearchScrollRequest request, ActionListener< request, (SearchTask) task, scrollId, - listener + loggingListener ); default -> throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized"); }; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index 4c8ade4d78ead..0d1672c77cbed 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -41,6 +42,9 @@ * An internal search shards API performs the can_match phase and returns target shards of indices that might match a query. */ public class TransportSearchShardsAction extends HandledTransportAction { + + public static final String NAME = "indices:admin/search/search_shards"; + public static final ActionType TYPE = new ActionType<>(NAME, SearchShardsResponse::new); private final TransportService transportService; private final TransportSearchAction transportSearchAction; private final SearchService searchService; @@ -61,7 +65,7 @@ public TransportSearchShardsAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - SearchShardsAction.NAME, + TYPE.name(), transportService, actionFilters, SearchShardsRequest::new, @@ -79,7 +83,6 @@ public TransportSearchShardsAction( @Override protected void doExecute(Task task, SearchShardsRequest searchShardsRequest, ActionListener listener) { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); final long relativeStartNanos = System.nanoTime(); SearchRequest original = new SearchRequest(searchShardsRequest.indices()).indicesOptions(searchShardsRequest.indicesOptions()) .routing(searchShardsRequest.routing()) diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java index a4836ca322035..c746bc9acf2a1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -28,6 +28,7 @@ public ChannelActionListener(TransportChannel channel) { @Override public void onResponse(Response response) { + response.incRef(); // acquire reference that will be released by channel.sendResponse below ActionListener.run(this, l -> l.channel.sendResponse(response)); } diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index b360443a396d1..721983b6af0e7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -30,10 +30,6 @@ public class PlainActionFuture implements ActionFuture, ActionListener { - public static PlainActionFuture newFuture() { - return new PlainActionFuture<>(); - } - @Override public void onResponse(T result) { set(result); @@ -442,13 +438,13 @@ private static RuntimeException unwrapEsException(ElasticsearchException esEx) { } public static T get(CheckedConsumer, E> e) throws E { - PlainActionFuture fut = newFuture(); + PlainActionFuture fut = new PlainActionFuture<>(); e.accept(fut); return fut.actionGet(); } public static T get(CheckedConsumer, E> e, long timeout, TimeUnit unit) throws E { - PlainActionFuture fut = newFuture(); + PlainActionFuture fut = new PlainActionFuture<>(); e.accept(fut); return fut.actionGet(timeout, unit); } diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java index f4d580a44621f..ff5c3115e569b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java @@ -10,13 +10,13 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Releasable; import java.util.Objects; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; /** * A mechanism to complete a listener on the completion of some (dynamic) collection of other actions. Basic usage is as follows: @@ -176,7 +176,7 @@ public String toString() { * It is also invalid to complete the returned listener more than once. Doing so will trip an assertion if assertions are enabled, but * will be ignored otherwise. */ - public ActionListener acquire(Consumer consumer) { + public ActionListener acquire(CheckedConsumer consumer) { final var ref = refs.acquire(); final var consumerRef = new AtomicReference<>(Objects.requireNonNull(consumer)); return new ActionListener<>() { @@ -187,10 +187,12 @@ public void onResponse(Response response) { if (acquiredConsumer == null) { assert false : "already closed"; } else { - acquiredConsumer.accept(response); + try { + acquiredConsumer.accept(response); + } catch (Exception e) { + addException(e); + } } - } catch (Exception e) { - addException(e); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java index d05f698749a3b..8dcc801f10c30 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java @@ -63,7 +63,6 @@ public final class RefCountingRunnable implements Releasable { private static final Logger logger = LogManager.getLogger(RefCountingRunnable.class); - static final String ALREADY_CLOSED_MESSAGE = "already closed, cannot acquire or release any further refs"; private final RefCounted refCounted; @@ -86,14 +85,11 @@ public RefCountingRunnable(Runnable delegate) { * will be ignored otherwise. This deviates from the contract of {@link java.io.Closeable}. */ public Releasable acquire() { - if (refCounted.tryIncRef()) { - // All refs are considered equal so there's no real need to allocate a new object here, although note that this deviates - // (subtly) from the docs for Closeable#close() which indicate that it should be idempotent. But only if assertions are - // disabled, and if assertions are enabled then we are asserting that we never double-close these things anyway. - return Releasables.assertOnce(this); - } - assert false : ALREADY_CLOSED_MESSAGE; - throw new IllegalStateException(ALREADY_CLOSED_MESSAGE); + refCounted.mustIncRef(); + // All refs are considered equal so there's no real need to allocate a new object here, although note that this deviates (subtly) + // from the docs for Closeable#close() which indicate that it should be idempotent. But only if assertions are disabled, and if + // assertions are enabled then we are asserting that we never double-close these things anyway. + return Releasables.assertOnce(this); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 194b4852c16d7..19c7561ccdb15 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -228,7 +228,7 @@ protected String[] resolveConcreteIndexNames(ClusterState clusterState, Request @Override protected void doExecute(Task task, Request request, ActionListener listener) { // workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can - request.incRef(); + request.mustIncRef(); executor.execute(ActionRunnable.wrapReleasing(listener, request::decRef, l -> doExecuteForked(task, request, listener))); } @@ -474,7 +474,7 @@ class NodeRequest extends TransportRequest implements IndicesRequest { } NodeRequest(Request indicesLevelRequest, List shards, String nodeId) { - indicesLevelRequest.incRef(); + indicesLevelRequest.mustIncRef(); this.indicesLevelRequest = indicesLevelRequest; this.shards = shards; this.nodeId = nodeId; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 71964d737e8d2..b771f6cc512d1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -169,7 +169,7 @@ protected void doExecute(Task task, final Request request, ActionListener li protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException; protected void asyncShardOperation(Request request, ShardId shardId, ActionListener listener) throws IOException { - getExecutor(request, shardId).execute(ActionRunnable.supply(listener, () -> shardOperation(request, shardId))); + getExecutor(request, shardId).execute(ActionRunnable.supplyAndDecRef(listener, () -> shardOperation(request, shardId))); } protected abstract Writeable.Reader getResponseReader(); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index af87fd8cddb4b..e94a619c7785e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -290,7 +290,7 @@ public void writeTo(StreamOutput out) throws IOException { protected NodeTaskRequest(TasksRequest tasksRequest) { super(); - tasksRequest.incRef(); + tasksRequest.mustIncRef(); this.tasksRequest = tasksRequest; } @@ -356,14 +356,6 @@ private class NodeTasksResponse extends TransportResponse { this.exceptions = exceptions; } - public String getNodeId() { - return nodeId; - } - - public List getExceptions() { - return exceptions; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); diff --git a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java index 0228dc7cc61ea..f75997d92b678 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java @@ -110,7 +110,7 @@ public Task exe transportAction(action), request, localConnection, - new SafelyWrappedActionListener<>(listener) + ActionListener.assertOnce(listener) ); } @@ -148,27 +148,4 @@ public NamedWriteableRegistry getNamedWriteableRegistry() { return namedWriteableRegistry; } - private record SafelyWrappedActionListener(ActionListener listener) implements ActionListener { - - @Override - public void onResponse(Response response) { - try { - listener.onResponse(response); - } catch (Exception e) { - assert false : new AssertionError("callback must handle its own exceptions", e); - throw e; - } - } - - @Override - public void onFailure(Exception e) { - try { - listener.onFailure(e); - } catch (Exception ex) { - ex.addSuppressed(e); - assert false : new AssertionError("callback must handle its own exceptions", ex); - throw ex; - } - } - } } diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 98bad4d3dd74c..82d0f2fb85847 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -247,10 +247,10 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder; @@ -280,21 +280,21 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; -import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollRequestBuilder; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchRequestBuilder; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchScrollRequestBuilder; +import org.elasticsearch.action.search.TransportClearScrollAction; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; @@ -320,11 +320,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; public abstract class AbstractClient implements Client { @@ -361,7 +364,7 @@ public final Ac ActionType action, Request request ) { - PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + PlainActionFuture actionFuture = new RefCountedFuture<>(); execute(action, request, actionFuture); return actionFuture; } @@ -506,47 +509,47 @@ public MultiGetRequestBuilder prepareMultiGet() { @Override public ActionFuture search(final SearchRequest request) { - return execute(SearchAction.INSTANCE, request); + return execute(TransportSearchAction.TYPE, request); } @Override public void search(final SearchRequest request, final ActionListener listener) { - execute(SearchAction.INSTANCE, request, listener); + execute(TransportSearchAction.TYPE, request, listener); } @Override public SearchRequestBuilder prepareSearch(String... indices) { - return new SearchRequestBuilder(this, SearchAction.INSTANCE).setIndices(indices); + return new SearchRequestBuilder(this, TransportSearchAction.TYPE).setIndices(indices); } @Override public ActionFuture searchScroll(final SearchScrollRequest request) { - return execute(SearchScrollAction.INSTANCE, request); + return execute(TransportSearchScrollAction.TYPE, request); } @Override public void searchScroll(final SearchScrollRequest request, final ActionListener listener) { - execute(SearchScrollAction.INSTANCE, request, listener); + execute(TransportSearchScrollAction.TYPE, request, listener); } @Override public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) { - return new SearchScrollRequestBuilder(this, SearchScrollAction.INSTANCE, scrollId); + return new SearchScrollRequestBuilder(this, TransportSearchScrollAction.TYPE, scrollId); } @Override public ActionFuture multiSearch(MultiSearchRequest request) { - return execute(MultiSearchAction.INSTANCE, request); + return execute(TransportMultiSearchAction.TYPE, request); } @Override public void multiSearch(MultiSearchRequest request, ActionListener listener) { - execute(MultiSearchAction.INSTANCE, request, listener); + execute(TransportMultiSearchAction.TYPE, request, listener); } @Override public MultiSearchRequestBuilder prepareMultiSearch() { - return new MultiSearchRequestBuilder(this, MultiSearchAction.INSTANCE); + return new MultiSearchRequestBuilder(this, TransportMultiSearchAction.TYPE); } @Override @@ -586,32 +589,32 @@ public MultiTermVectorsRequestBuilder prepareMultiTermVectors() { @Override public ExplainRequestBuilder prepareExplain(String index, String id) { - return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, id); + return new ExplainRequestBuilder(this, TransportExplainAction.TYPE, index, id); } @Override public ActionFuture explain(ExplainRequest request) { - return execute(ExplainAction.INSTANCE, request); + return execute(TransportExplainAction.TYPE, request); } @Override public void explain(ExplainRequest request, ActionListener listener) { - execute(ExplainAction.INSTANCE, request, listener); + execute(TransportExplainAction.TYPE, request, listener); } @Override public void clearScroll(ClearScrollRequest request, ActionListener listener) { - execute(ClearScrollAction.INSTANCE, request, listener); + execute(TransportClearScrollAction.TYPE, request, listener); } @Override public ActionFuture clearScroll(ClearScrollRequest request) { - return execute(ClearScrollAction.INSTANCE, request); + return execute(TransportClearScrollAction.TYPE, request); } @Override public ClearScrollRequestBuilder prepareClearScroll() { - return new ClearScrollRequestBuilder(this, ClearScrollAction.INSTANCE); + return new ClearScrollRequestBuilder(this, TransportClearScrollAction.TYPE); } @Override @@ -1598,4 +1601,34 @@ protected void } }; } + + /** + * Same as {@link PlainActionFuture} but for use with {@link RefCounted} result types. Unlike {@code PlainActionFuture} this future + * acquires a reference to its result. This means that the result reference must be released by a call to {@link RefCounted#decRef()} + * on the result before it goes out of scope. + * @param reference counted result type + */ + private static class RefCountedFuture extends PlainActionFuture { + + @Override + public final void onResponse(R result) { + result.mustIncRef(); + if (set(result) == false) { + result.decRef(); + } + } + + private final AtomicBoolean getCalled = new AtomicBoolean(false); + + @Override + public R get() throws InterruptedException, ExecutionException { + final boolean firstCall = getCalled.compareAndSet(false, true); + if (firstCall == false) { + final IllegalStateException ise = new IllegalStateException("must only call .get() once per instance to avoid leaks"); + assert false : ise; + throw ise; + } + return super.get(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index b400269265224..0392ca2e6581a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -92,26 +92,6 @@ public boolean indexRoutingTableChanged(String index) { return true; } - /** - * Returns the indices created in this event - */ - public List indicesCreated() { - if (metadataChanged() == false) { - return Collections.emptyList(); - } - List created = null; - for (Map.Entry cursor : state.metadata().indices().entrySet()) { - String index = cursor.getKey(); - if (previousState.metadata().hasIndex(index) == false) { - if (created == null) { - created = new ArrayList<>(); - } - created.add(index); - } - } - return created == null ? Collections.emptyList() : created; - } - /** * Returns the indices deleted in this event */ diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java index ae68bfafdd6c5..95cc53376af59 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java @@ -12,12 +12,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; @@ -50,24 +52,24 @@ public ClusterFeatures(Map> nodeFeatures) { .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> Set.copyOf(e.getValue()))); } - private Set calculateAllNodeFeatures() { + public static Set calculateAllNodeFeatures(Collection> nodeFeatures) { if (nodeFeatures.isEmpty()) { return Set.of(); } Set allNodeFeatures = null; - for (Set featureSet : nodeFeatures.values()) { + for (Set featureSet : nodeFeatures) { if (allNodeFeatures == null) { allNodeFeatures = new HashSet<>(featureSet); } else { allNodeFeatures.retainAll(featureSet); } } - return Set.copyOf(allNodeFeatures); + return allNodeFeatures; } /** - * Returns the features reported by each node in the cluster. + * The features reported by each node in the cluster. *

    * NOTE: This should not be used directly. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. @@ -76,17 +78,28 @@ public Map> nodeFeatures() { return nodeFeatures; } + /** + * The features in all nodes in the cluster. + *

    + * NOTE: This should not be used directly. + * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. + */ + public Set allNodeFeatures() { + if (allNodeFeatures == null) { + allNodeFeatures = Set.copyOf(calculateAllNodeFeatures(nodeFeatures.values())); + } + return allNodeFeatures; + } + /** * {@code true} if {@code feature} is present on all nodes in the cluster. *

    * NOTE: This should not be used directly, as it does not read historical features. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. */ + @SuppressForbidden(reason = "directly reading cluster features") public boolean clusterHasFeature(NodeFeature feature) { - if (allNodeFeatures == null) { - allNodeFeatures = calculateAllNodeFeatures(); - } - return allNodeFeatures.contains(feature.id()); + return allNodeFeatures().contains(feature.id()); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index ee94008372dab..5f682804a5b88 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.ComponentTemplateMetadata; @@ -65,6 +64,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.health.metadata.HealthMetadataService; import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor; @@ -77,6 +77,7 @@ import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.upgrades.FeatureMigrationResults; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -126,7 +127,8 @@ public ClusterModule( SnapshotsInfoService snapshotsInfoService, ThreadPool threadPool, SystemIndices systemIndices, - WriteLoadForecaster writeLoadForecaster + WriteLoadForecaster writeLoadForecaster, + TelemetryProvider telemetryProvider ) { this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); @@ -138,7 +140,8 @@ public ClusterModule( clusterPlugins, clusterService, this::reconcile, - writeLoadForecaster + writeLoadForecaster, + telemetryProvider ); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices); @@ -373,6 +376,7 @@ private static void addAllocationDecider(Map, AllocationDecider> decide } } + @UpdateForV9 // in v9 there is only one allocator private static ShardsAllocator createShardsAllocator( Settings settings, ClusterSettings clusterSettings, @@ -380,7 +384,8 @@ private static ShardsAllocator createShardsAllocator( List clusterPlugins, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, - WriteLoadForecaster writeLoadForecaster + WriteLoadForecaster writeLoadForecaster, + TelemetryProvider telemetryProvider ) { Map> allocators = new HashMap<>(); allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); @@ -391,7 +396,8 @@ private static ShardsAllocator createShardsAllocator( new BalancedShardsAllocator(clusterSettings, writeLoadForecaster), threadPool, clusterService, - reconciler + reconciler, + telemetryProvider ) ); @@ -404,7 +410,6 @@ private static ShardsAllocator createShardsAllocator( }); } String allocatorName = SHARDS_ALLOCATOR_TYPE_SETTING.get(settings); - assert Version.CURRENT.major == Version.V_7_17_0.major + 1; // in v9 there is only one allocator Supplier allocatorSupplier = allocators.get(allocatorName); if (allocatorSupplier == null) { throw new IllegalArgumentException("Unknown ShardsAllocator [" + allocatorName + "]"); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 565e43455d8d7..e861ff3ecf27e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -818,12 +818,6 @@ public DiscoveryNodes nodes() { return nodes; } - // Deprecate to keep downstream projects compiling - @Deprecated(forRemoval = true) - public Builder putTransportVersion(String nodeId, TransportVersion transportVersion) { - return putCompatibilityVersions(nodeId, transportVersion, Map.of()); - } - public Builder putCompatibilityVersions( String nodeId, TransportVersion transportVersion, @@ -840,12 +834,6 @@ public Builder putCompatibilityVersions(String nodeId, CompatibilityVersions ver return this; } - // Deprecate to keep downstream projects compiling - @Deprecated(forRemoval = true) - public Builder compatibilityVersions(Map versions) { - return nodeIdsToCompatibilityVersions(versions); - } - public Builder nodeIdsToCompatibilityVersions(Map versions) { versions.forEach((key, value) -> Objects.requireNonNull(value, key)); // remove all versions not present in the new map @@ -1047,7 +1035,7 @@ private static TransportVersion inferTransportVersion(DiscoveryNode node) { TransportVersion tv; if (node.getVersion().before(Version.V_8_8_0)) { // 1-to-1 mapping between Version and TransportVersion - tv = TransportVersion.fromId(node.getVersion().id); + tv = TransportVersion.fromId(node.getPre811VersionId().getAsInt()); } else { // use the lowest value it could be for now tv = INFERRED_TRANSPORT_VERSION; diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index bd5fbe189ead5..1744bcc91b834 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -284,8 +285,9 @@ public void onFailure(Exception e) { private void fetchNodeStats() { final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); + nodesStatsRequest.setIncludeShardsStats(false); nodesStatsRequest.clear(); - nodesStatsRequest.addMetric(NodesStatsRequest.Metric.FS.metricName()); + nodesStatsRequest.addMetric(NodesStatsRequestParameters.Metric.FS.metricName()); nodesStatsRequest.timeout(fetchTimeout); client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.releaseAfter(new ActionListener<>() { @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 0f046d4ab94f1..1a079d03405d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -221,11 +222,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params ignored) { - return Iterators.concat( - Iterators.single((builder, params) -> builder.startArray("snapshots")), - asStream().iterator(), - Iterators.single((builder, params) -> builder.endArray()) - ); + return Iterators.concat(ChunkedToXContentHelper.startArray("snapshots"), asStream().iterator(), ChunkedToXContentHelper.endArray()); } @Override @@ -346,14 +343,20 @@ private static boolean assertConsistentEntries(Map entries) { assert entry.repository().equals(repository) : "mismatched repository " + entry + " tracked under " + repository; for (Map.Entry shard : entry.shardsByRepoShardId().entrySet()) { final RepositoryShardId sid = shard.getKey(); + final ShardSnapshotStatus shardSnapshotStatus = shard.getValue(); assert assertShardStateConsistent( entriesForRepository, assignedShards, queuedShards, sid.indexName(), sid.shardId(), - shard.getValue() + shardSnapshotStatus ); + + assert entry.state() != State.ABORTED + || shardSnapshotStatus.state == ShardState.ABORTED + || shardSnapshotStatus.state().completed() + : sid + " is in state " + shardSnapshotStatus.state() + " in aborted snapshot " + entry.snapshot; } } // make sure in-flight-shard-states can be built cleanly for the entries without tripping assertions diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 0f84ecab5f8b2..51fca588699e2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -60,7 +61,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -283,13 +283,13 @@ private static class ShardFailedTransportHandler implements TransportRequestHand private static final String TASK_SOURCE = "shard-failed"; @Override - public void messageReceived(FailedShardEntry request, TransportChannel channel, Task task) throws Exception { + public void messageReceived(FailedShardEntry request, TransportChannel channel, Task task) { logger.debug(() -> format("%s received shard failed for [%s]", request.getShardId(), request), request.failure); - var update = new FailedShardUpdateTask( - request, - new ChannelActionListener<>(channel).map(ignored -> TransportResponse.Empty.INSTANCE) + taskQueue.submitTask( + TASK_SOURCE, + new FailedShardUpdateTask(request, new ChannelActionListener<>(channel).map(ignored -> TransportResponse.Empty.INSTANCE)), + null ); - taskQueue.submitTask(TASK_SOURCE, update, null); } } @@ -423,7 +423,7 @@ public void clusterStatePublished(ClusterState newClusterState) { // The reroute called after failing some shards will not assign any shard back to the node on which it failed. If there were // no other options for a failed shard then it is left unassigned. However, absent other options it's better to try and // assign it again, even if that means putting it back on the node on which it previously failed: - final String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); + final String reason = Strings.format("[%d] unassigned shards after failing shards", numberOfUnassignedShards); logger.trace("{}, scheduling a reroute", reason); rerouteService.reroute( reason, @@ -493,16 +493,15 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - List components = new ArrayList<>(6); - components.add("shard id [" + shardId + "]"); - components.add("allocation id [" + allocationId + "]"); - components.add("primary term [" + primaryTerm + "]"); - components.add("message [" + message + "]"); - components.add("markAsStale [" + markAsStale + "]"); - if (failure != null) { - components.add("failure [" + ExceptionsHelper.stackTrace(failure) + "]"); - } - return String.join(", ", components); + return Strings.format( + "FailedShardEntry{shardId [%s], allocationId [%s], primary term [%d], message [%s], markAsStale [%b], failure [%s]}", + shardId, + allocationId, + primaryTerm, + message, + markAsStale, + failure != null ? ExceptionsHelper.stackTrace(failure) : null + ); } @Override @@ -785,8 +784,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return String.format( - Locale.ROOT, + return Strings.format( "StartedShardEntry{shardId [%s], allocationId [%s], primary term [%d], message [%s]}", shardId, allocationId, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 113e8b0a7f388..402e170f1ea53 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -183,7 +183,7 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { final String reason; if (electionWon == false) { reason = "failed election"; - } else if (startJoinRequest.getSourceNode().equals(localNode)) { + } else if (startJoinRequest.getMasterCandidateNode().equals(localNode)) { reason = "bumping term"; } else { reason = "standing down as leader"; @@ -200,7 +200,13 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { joinVotes = new VoteCollection(); publishVotes = new VoteCollection(); - return new Join(localNode, startJoinRequest.getSourceNode(), getCurrentTerm(), getLastAcceptedTerm(), getLastAcceptedVersion()); + return new Join( + localNode, + startJoinRequest.getMasterCandidateNode(), + getCurrentTerm(), + getLastAcceptedTerm(), + getLastAcceptedVersion() + ); } /** @@ -211,12 +217,12 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { * @throws CoordinationStateRejectedException if the arguments were incompatible with the current state of this object. */ public boolean handleJoin(Join join) { - assert join.targetMatches(localNode) : "handling join " + join + " for the wrong node " + localNode; + assert join.masterCandidateMatches(localNode) : "handling join " + join + " for the wrong node " + localNode; - if (join.getTerm() != getCurrentTerm()) { - logger.debug("handleJoin: ignored join due to term mismatch (expected: [{}], actual: [{}])", getCurrentTerm(), join.getTerm()); + if (join.term() != getCurrentTerm()) { + logger.debug("handleJoin: ignored join due to term mismatch (expected: [{}], actual: [{}])", getCurrentTerm(), join.term()); throw new CoordinationStateRejectedException( - "incoming term " + join.getTerm() + " does not match current term " + getCurrentTerm() + "incoming term " + join.term() + " does not match current term " + getCurrentTerm() ); } @@ -226,30 +232,30 @@ public boolean handleJoin(Join join) { } final long lastAcceptedTerm = getLastAcceptedTerm(); - if (join.getLastAcceptedTerm() > lastAcceptedTerm) { + if (join.lastAcceptedTerm() > lastAcceptedTerm) { logger.debug( "handleJoin: ignored join as joiner has a better last accepted term (expected: <=[{}], actual: [{}])", lastAcceptedTerm, - join.getLastAcceptedTerm() + join.lastAcceptedTerm() ); throw new CoordinationStateRejectedException( "incoming last accepted term " - + join.getLastAcceptedTerm() + + join.lastAcceptedTerm() + " of join higher than current last accepted term " + lastAcceptedTerm ); } - if (join.getLastAcceptedTerm() == lastAcceptedTerm && join.getLastAcceptedVersion() > getLastAcceptedVersion()) { + if (join.lastAcceptedTerm() == lastAcceptedTerm && join.lastAcceptedVersion() > getLastAcceptedVersion()) { logger.debug( "handleJoin: ignored join as joiner has a better last accepted version (expected: <=[{}], actual: [{}]) in term {}", getLastAcceptedVersion(), - join.getLastAcceptedVersion(), + join.lastAcceptedVersion(), lastAcceptedTerm ); throw new CoordinationStateRejectedException( "incoming last accepted version " - + join.getLastAcceptedVersion() + + join.lastAcceptedVersion() + " of join higher than current last accepted version " + getLastAcceptedVersion() + " in term " @@ -274,7 +280,7 @@ public boolean handleJoin(Join join) { logger.debug( "handleJoin: added join {} from [{}] for election, electionWon={} lastAcceptedTerm={} lastAcceptedVersion={}", join, - join.getSourceNode(), + join.votingNode(), electionWon, lastAcceptedTerm, getLastAcceptedVersion() @@ -592,7 +598,7 @@ public boolean addVote(DiscoveryNode sourceNode) { } public boolean addJoinVote(Join join) { - final boolean added = addVote(join.getSourceNode()); + final boolean added = addVote(join.votingNode()); if (added) { joins.add(join); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 08e31e11ae256..3da890b37ade8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.internal.Client; @@ -63,6 +63,7 @@ import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.SeedHostsResolver; import org.elasticsearch.discovery.TransportAddressConnector; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -212,7 +213,7 @@ public Coordinator( LeaderHeartbeatService leaderHeartbeatService, PreVoteCollector.Factory preVoteCollectorFactory, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { this.settings = settings; this.transportService = transportService; @@ -238,7 +239,7 @@ public Coordinator( reconfigurator::maybeReconfigureAfterNewMasterIsElected, this::getLatestStoredStateAfterWinningAnElection, compatibilityVersions, - features + featureService ); this.joinValidationService = new JoinValidationService( settings, @@ -488,7 +489,7 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { } private static Optional joinWithDestination(Optional lastJoin, DiscoveryNode leader, long term) { - if (lastJoin.isPresent() && lastJoin.get().targetMatches(leader) && lastJoin.get().getTerm() == term) { + if (lastJoin.isPresent() && lastJoin.get().masterCandidateMatches(leader) && lastJoin.get().term() == term) { return lastJoin; } @@ -565,6 +566,10 @@ public void onFailure(Exception e) { }); } + /** + * Attempts to abdicate master position to a new master-eligible node in the cluster. + * Broadcasts {@link StartJoinRequest} for {@param newMaster} to each member of the cluster. + */ private void abdicateTo(DiscoveryNode newMaster) { assert Thread.holdsLock(mutex); assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; @@ -594,7 +599,7 @@ private Optional ensureTermAtLeast(DiscoveryNode sourceNode, long targetTe private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) { synchronized (mutex) { - logger.debug("joinLeaderInTerm: for [{}] with term {}", startJoinRequest.getSourceNode(), startJoinRequest.getTerm()); + logger.debug("joinLeaderInTerm: for [{}] with term {}", startJoinRequest.getMasterCandidateNode(), startJoinRequest.getTerm()); final Join join = coordinationState.get().handleStartJoin(startJoinRequest); lastJoin = Optional.of(join); peerFinder.setCurrentTerm(getCurrentTerm()); @@ -629,21 +634,11 @@ private void handleJoinRequest(JoinRequest joinRequest, ActionListener joi transportService.connectToNode(joinRequest.getSourceNode(), new ActionListener<>() { @Override public void onResponse(Releasable response) { - boolean retainConnection = false; - try { - validateJoinRequest( - joinRequest, - ActionListener.runBefore(joinListener, () -> Releasables.close(response)) - .delegateFailure((l, ignored) -> processJoinRequest(joinRequest, l)) - ); - retainConnection = true; - } catch (Exception e) { - joinListener.onFailure(e); - } finally { - if (retainConnection == false) { - Releasables.close(response); - } - } + validateJoinRequest( + joinRequest, + ActionListener.runBefore(joinListener, () -> Releasables.close(response)) + .delegateFailure((l, ignored) -> processJoinRequest(joinRequest, l)) + ); } @Override @@ -682,48 +677,39 @@ private void validateJoinRequest(JoinRequest joinRequest, ActionListener v // - if we're already master that it can make sense of the current cluster state. // - we have a healthy PING channel to the node - final ClusterState stateForJoinValidation = getStateForJoinValidationService(); - final ListenableActionFuture validateStateListener = new ListenableActionFuture<>(); - if (stateForJoinValidation != null) { - assert stateForJoinValidation.nodes().isLocalNodeElectedMaster(); - onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); - if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { - // We do this in a couple of places including the cluster update thread. This one here is really just best effort to ensure - // we fail as fast as possible. - NodeJoinExecutor.ensureVersionBarrier( - joinRequest.getSourceNode().getVersion(), - stateForJoinValidation.getNodes().getMinNodeVersion() - ); - } - sendJoinValidate(joinRequest.getSourceNode(), validateStateListener); - } else { - sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.STATE, validateStateListener); - } + try (var listeners = new RefCountingListener(validateListener)) { + // The join will be rejected if any of these steps fail, but we wait them all to complete, particularly state validation, since + // the node will retry and we don't want lots of cluster states in flight. - sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.PING, new ActionListener<>() { - @Override - public void onResponse(Void ignored) { - validateStateListener.addListener(validateListener); - } + ActionListener.completeWith(listeners.acquire(), () -> { + final ClusterState stateForJoinValidation = getStateForJoinValidationService(); + if (stateForJoinValidation == null) { + return null; + } - @Override - public void onFailure(Exception e) { - // The join will be rejected, but we wait for the state validation to complete as well since the node will retry and we - // don't want lots of cluster states in flight. - validateStateListener.addListener(new ActionListener<>() { - @Override - public void onResponse(Void ignored) { - validateListener.onFailure(e); - } + assert stateForJoinValidation.nodes().isLocalNodeElectedMaster(); + onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); + if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { + // We do this in a couple of places including the cluster update thread. This one here is really just best effort to + // ensure we fail as fast as possible. + NodeJoinExecutor.ensureVersionBarrier( + joinRequest.getSourceNode().getVersion(), + stateForJoinValidation.getNodes().getMinNodeVersion() + ); + } + sendJoinValidate(joinRequest.getSourceNode(), listeners.acquire()); + return null; + }); - @Override - public void onFailure(Exception e2) { - e2.addSuppressed(e); - validateListener.onFailure(e2); - } - }); + if (listeners.isFailing() == false) { + // We may not have sent a state for validation, so just ping both channel types. + sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.PING, listeners.acquire()); + sendJoinPing(joinRequest.getSourceNode(), TransportRequestOptions.Type.STATE, listeners.acquire()); } - }); + } catch (Exception e) { + logger.error("unexpected exception in validateJoinRequest", e); + assert false : e; + } } private void sendJoinValidate(DiscoveryNode discoveryNode, ActionListener listener) { @@ -775,7 +761,7 @@ private void processJoinRequest(JoinRequest joinRequest, ActionListener jo final CoordinationState coordState = coordinationState.get(); final boolean prevElectionWon = coordState.electionWon() - && optionalJoin.stream().allMatch(j -> j.getTerm() <= getCurrentTerm()); + && optionalJoin.stream().allMatch(j -> j.term() <= getCurrentTerm()); optionalJoin.ifPresent(this::handleJoin); joinAccumulator.handleJoinRequest( @@ -1394,7 +1380,7 @@ boolean missingJoinVoteFrom(DiscoveryNode node) { private void handleJoin(Join join) { synchronized (mutex) { - ensureTermAtLeast(getLocalNode(), join.getTerm()).ifPresent(this::handleJoin); + ensureTermAtLeast(getLocalNode(), join.term()).ifPresent(this::handleJoin); if (coordinationState.get().electionWon()) { // If we have already won the election then the actual join does not matter for election purposes, so swallow any exception @@ -2070,7 +2056,7 @@ private void cancelTimeoutHandlers() { } private void handleAssociatedJoin(Join join) { - if (join.getTerm() == getCurrentTerm() && missingJoinVoteFrom(join.getSourceNode())) { + if (join.term() == getCurrentTerm() && missingJoinVoteFrom(join.votingNode())) { logger.trace("handling {}", join); handleJoin(join); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java index aacbed61b095a..d1fe472278f12 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java @@ -16,110 +16,44 @@ /** * Triggered by a {@link StartJoinRequest}, instances of this class represent join votes, - * and have a source and target node. The source node is the node that provides the vote, - * and the target node is the node for which this vote is cast. A node will only cast - * a single vote per term, and this for a unique target node. The vote also carries - * information about the current state of the node that provided the vote, so that - * the receiver of the vote can determine if it has a more up-to-date state than the - * source node. + * and have a voting and master-candidate node. The voting node is the node that provides + * the vote, and the master-candidate node is the node for which this vote is cast. A join + * vote is cast to reform the cluster around a particular master-eligible node, to elect + * that node as the new master in a new term. + * + * A voting node will only cast a single vote per term. The vote includes information about + * the current state of the node casting the vote, so that the candidate for the vote can + * determine whether it has a more up-to-date state than the voting node. + * + * @param votingNode The node casting a vote for a master candidate. + * @param masterCandidateNode The master candidate node receiving the vote for election. + * @param term + * @param lastAcceptedTerm + * @param lastAcceptedVersion */ -public class Join implements Writeable { - private final DiscoveryNode sourceNode; - private final DiscoveryNode targetNode; - private final long term; - private final long lastAcceptedTerm; - private final long lastAcceptedVersion; - - public Join(DiscoveryNode sourceNode, DiscoveryNode targetNode, long term, long lastAcceptedTerm, long lastAcceptedVersion) { +public record Join(DiscoveryNode votingNode, DiscoveryNode masterCandidateNode, long term, long lastAcceptedTerm, long lastAcceptedVersion) + implements + Writeable { + public Join { assert term >= 0; assert lastAcceptedTerm >= 0; assert lastAcceptedVersion >= 0; - - this.sourceNode = sourceNode; - this.targetNode = targetNode; - this.term = term; - this.lastAcceptedTerm = lastAcceptedTerm; - this.lastAcceptedVersion = lastAcceptedVersion; } public Join(StreamInput in) throws IOException { - sourceNode = new DiscoveryNode(in); - targetNode = new DiscoveryNode(in); - term = in.readLong(); - lastAcceptedTerm = in.readLong(); - lastAcceptedVersion = in.readLong(); + this(new DiscoveryNode(in), new DiscoveryNode(in), in.readLong(), in.readLong(), in.readLong()); } @Override public void writeTo(StreamOutput out) throws IOException { - sourceNode.writeTo(out); - targetNode.writeTo(out); + votingNode.writeTo(out); + masterCandidateNode.writeTo(out); out.writeLong(term); out.writeLong(lastAcceptedTerm); out.writeLong(lastAcceptedVersion); } - public DiscoveryNode getSourceNode() { - return sourceNode; - } - - public DiscoveryNode getTargetNode() { - return targetNode; - } - - public boolean targetMatches(DiscoveryNode matchingNode) { - return targetNode.getId().equals(matchingNode.getId()); - } - - public long getLastAcceptedVersion() { - return lastAcceptedVersion; - } - - public long getTerm() { - return term; - } - - public long getLastAcceptedTerm() { - return lastAcceptedTerm; - } - - @Override - public String toString() { - return "Join{" - + "term=" - + term - + ", lastAcceptedTerm=" - + lastAcceptedTerm - + ", lastAcceptedVersion=" - + lastAcceptedVersion - + ", sourceNode=" - + sourceNode - + ", targetNode=" - + targetNode - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Join join = (Join) o; - - if (sourceNode.equals(join.sourceNode) == false) return false; - if (targetNode.equals(join.targetNode) == false) return false; - if (lastAcceptedVersion != join.lastAcceptedVersion) return false; - if (term != join.term) return false; - return lastAcceptedTerm == join.lastAcceptedTerm; - } - - @Override - public int hashCode() { - int result = (int) (lastAcceptedVersion ^ (lastAcceptedVersion >>> 32)); - result = 31 * result + sourceNode.hashCode(); - result = 31 * result + targetNode.hashCode(); - result = 31 * result + (int) (term ^ (term >>> 32)); - result = 31 * result + (int) (lastAcceptedTerm ^ (lastAcceptedTerm >>> 32)); - return result; + public boolean masterCandidateMatches(DiscoveryNode matchingNode) { + return masterCandidateNode.getId().equals(matchingNode.getId()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index e5dee6aeb67e2..d11d8ade2a036 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; @@ -62,6 +63,12 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; +/** + * Handler for cluster join commands. A master-eligible node running for election will + * send a {@link StartJoinRequest} to each voting node in the cluster. A node that becomes + * aware of a new term and master will send a {@link Join} request to the new master, to + * re-form the cluster around the new master node. + */ public class JoinHelper { private static final Logger logger = LogManager.getLogger(JoinHelper.class); @@ -100,12 +107,12 @@ public class JoinHelper { Function maybeReconfigureAfterMasterElection, ObjLongConsumer> latestStoredStateSupplier, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { this.joinTaskQueue = masterService.createTaskQueue( "node-join", Priority.URGENT, - new NodeJoinExecutor(allocationService, rerouteService, maybeReconfigureAfterMasterElection) + new NodeJoinExecutor(allocationService, rerouteService, featureService, maybeReconfigureAfterMasterElection) ); this.clusterApplier = clusterApplier; this.transportService = transportService; @@ -115,7 +122,7 @@ public class JoinHelper { this.joinReasonService = joinReasonService; this.latestStoredStateSupplier = latestStoredStateSupplier; this.compatibilityVersions = compatibilityVersions; - this.features = features; + this.features = featureService.getNodeFeatures().keySet(); transportService.registerRequestHandler( JOIN_ACTION_NAME, @@ -136,7 +143,7 @@ public class JoinHelper { false, StartJoinRequest::new, (request, channel, task) -> { - final DiscoveryNode destination = request.getSourceNode(); + final DiscoveryNode destination = request.getMasterCandidateNode(); sendJoinRequest(destination, currentTermSupplier.getAsLong(), Optional.of(joinLeaderInTerm.apply(request))); channel.sendResponse(Empty.INSTANCE); } @@ -368,8 +375,8 @@ public void onFailure(Exception e) { } void sendStartJoinRequest(final StartJoinRequest startJoinRequest, final DiscoveryNode destination) { - assert startJoinRequest.getSourceNode().isMasterNode() - : "sending start-join request for master-ineligible " + startJoinRequest.getSourceNode(); + assert startJoinRequest.getMasterCandidateNode().isMasterNode() + : "sending start-join request for master-ineligible " + startJoinRequest.getMasterCandidateNode(); transportService.sendRequest(destination, START_JOIN_ACTION_NAME, startJoinRequest, new TransportResponseHandler.Empty() { @Override public Executor executor(ThreadPool threadPool) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java index 2ba65873738a0..a6a2f454694ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java @@ -60,7 +60,7 @@ public JoinRequest( long minimumTerm, Optional optionalJoin ) { - assert optionalJoin.isPresent() == false || optionalJoin.get().getSourceNode().equals(sourceNode); + assert optionalJoin.isPresent() == false || optionalJoin.get().votingNode().equals(sourceNode); this.sourceNode = sourceNode; this.compatibilityVersions = compatibilityVersions; this.features = features; @@ -76,7 +76,10 @@ public JoinRequest(StreamInput in) throws IOException { } else { // there's a 1-1 mapping from Version to TransportVersion before 8.8.0 // no known mapping versions here - compatibilityVersions = new CompatibilityVersions(TransportVersion.fromId(sourceNode.getVersion().id), Map.of()); + compatibilityVersions = new CompatibilityVersions( + TransportVersion.fromId(sourceNode.getPre811VersionId().getAsInt()), + Map.of() + ); } if (in.getTransportVersion().onOrAfter(TransportVersions.CLUSTER_FEATURES_ADDED)) { features = in.readCollectionAsSet(StreamInput::readString); @@ -121,7 +124,7 @@ public long getTerm() { // If the join is also present then its term will normally equal the corresponding term, but we do not require callers to // obtain the term and the join in a synchronized fashion so it's possible that they disagree. Also older nodes do not share the // minimum term, so for BWC we can take it from the join if present. - return Math.max(minimumTerm, optionalJoin.map(Join::getTerm).orElse(0L)); + return Math.max(minimumTerm, optionalJoin.map(Join::term).orElse(0L)); } public Optional getOptionalJoin() { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index d9911ad12df84..6ba35d6aec25a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -12,7 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.ClusterState; @@ -30,6 +30,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.Environment; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -148,10 +149,23 @@ public JoinValidationService( } public void validateJoin(DiscoveryNode discoveryNode, ActionListener listener) { - if (discoveryNode.getVersion().onOrAfter(Version.V_8_3_0)) { + // This node isn't in the cluster yet so ClusterState#getMinTransportVersion() doesn't apply, we must obtain a specific connection + // so we can check its transport version to decide how to proceed. + + final Transport.Connection connection; + try { + connection = transportService.getConnection(discoveryNode); + assert connection != null; + } catch (Exception e) { + assert e instanceof NodeNotConnectedException : e; + listener.onFailure(e); + return; + } + + if (connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { if (executeRefs.tryIncRef()) { try { - execute(new JoinValidation(discoveryNode, listener)); + execute(new JoinValidation(discoveryNode, connection, listener)); } finally { executeRefs.decRef(); } @@ -159,39 +173,44 @@ public void validateJoin(DiscoveryNode discoveryNode, ActionListener liste listener.onFailure(new NodeClosedException(transportService.getLocalNode())); } } else { - final var responseHandler = TransportResponseHandler.empty(responseExecutor, listener.delegateResponse((l, e) -> { - logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e); - listener.onFailure( - new IllegalStateException( - String.format( - Locale.ROOT, - "failure when sending a join validation request from [%s] to [%s]", - transportService.getLocalNode().descriptionWithoutAttributes(), - discoveryNode.descriptionWithoutAttributes() - ), - e - ) - ); - })); - final var clusterState = clusterStateSupplier.get(); - if (clusterState != null) { - assert clusterState.nodes().isLocalNodeElectedMaster(); - transportService.sendRequest( - discoveryNode, - JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(clusterState), - REQUEST_OPTIONS, - responseHandler - ); - } else { - transportService.sendRequest( - discoveryNode, - JoinHelper.JOIN_PING_ACTION_NAME, - TransportRequest.Empty.INSTANCE, - REQUEST_OPTIONS, - responseHandler - ); - } + legacyValidateJoin(discoveryNode, listener, connection); + } + } + + @UpdateForV9 + private void legacyValidateJoin(DiscoveryNode discoveryNode, ActionListener listener, Transport.Connection connection) { + final var responseHandler = TransportResponseHandler.empty(responseExecutor, listener.delegateResponse((l, e) -> { + logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e); + listener.onFailure( + new IllegalStateException( + String.format( + Locale.ROOT, + "failure when sending a join validation request from [%s] to [%s]", + transportService.getLocalNode().descriptionWithoutAttributes(), + discoveryNode.descriptionWithoutAttributes() + ), + e + ) + ); + })); + final var clusterState = clusterStateSupplier.get(); + if (clusterState != null) { + assert clusterState.nodes().isLocalNodeElectedMaster(); + transportService.sendRequest( + connection, + JOIN_VALIDATE_ACTION_NAME, + new ValidateJoinRequest(clusterState), + REQUEST_OPTIONS, + responseHandler + ); + } else { + transportService.sendRequest( + connection, + JoinHelper.JOIN_PING_ACTION_NAME, + TransportRequest.Empty.INSTANCE, + REQUEST_OPTIONS, + responseHandler + ); } } @@ -312,27 +331,22 @@ public String toString() { private class JoinValidation extends ActionRunnable { private final DiscoveryNode discoveryNode; + private final Transport.Connection connection; - JoinValidation(DiscoveryNode discoveryNode, ActionListener listener) { + JoinValidation(DiscoveryNode discoveryNode, Transport.Connection connection, ActionListener listener) { super(listener); this.discoveryNode = discoveryNode; + this.connection = connection; } @Override - protected void doRun() throws Exception { - assert discoveryNode.getVersion().onOrAfter(Version.V_8_3_0) : discoveryNode.getVersion(); + protected void doRun() { + assert connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) : discoveryNode.getVersion(); // NB these things never run concurrently to each other, or to the cache cleaner (see IMPLEMENTATION NOTES above) so it is safe // to do these (non-atomic) things to the (unsynchronized) statesByVersion map. - Transport.Connection connection; - try { - connection = transportService.getConnection(discoveryNode); - } catch (NodeNotConnectedException e) { - listener.onFailure(e); - return; - } - var version = connection.getTransportVersion(); - var cachedBytes = statesByVersion.get(version); - var bytes = maybeSerializeClusterState(cachedBytes, discoveryNode, version); + var transportVersion = connection.getTransportVersion(); + var cachedBytes = statesByVersion.get(transportVersion); + var bytes = maybeSerializeClusterState(cachedBytes, discoveryNode, transportVersion); if (bytes == null) { // Normally if we're not the master then the Coordinator sends a ping message just to validate connectivity instead of // getting here. But if we were the master when the Coordinator checked then we might not be the master any more, so we @@ -349,12 +363,11 @@ protected void doRun() throws Exception { ); return; } - assert bytes.hasReferences() : "already closed"; - bytes.incRef(); + bytes.mustIncRef(); transportService.sendRequest( connection, JOIN_VALIDATE_ACTION_NAME, - new BytesTransportRequest(bytes, version), + new BytesTransportRequest(bytes, transportVersion), REQUEST_OPTIONS, new CleanableResponseHandler<>( listener.map(ignored -> null), diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 00086c42ed4ae..480f1d5503d61 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterFeatures; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.NotMasterException; @@ -25,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -34,6 +36,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -55,19 +58,22 @@ public class NodeJoinExecutor implements ClusterStateTaskExecutor { private final AllocationService allocationService; private final RerouteService rerouteService; + private final FeatureService featureService; private final Function maybeReconfigureAfterMasterElection; - public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService) { - this(allocationService, rerouteService, Function.identity()); + public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService, FeatureService featureService) { + this(allocationService, rerouteService, featureService, Function.identity()); } public NodeJoinExecutor( AllocationService allocationService, RerouteService rerouteService, + FeatureService featureService, Function maybeReconfigureAfterMasterElection ) { this.allocationService = allocationService; this.rerouteService = rerouteService; + this.featureService = featureService; this.maybeReconfigureAfterMasterElection = maybeReconfigureAfterMasterElection; } @@ -123,6 +129,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); Map compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions()); Map> nodeFeatures = new HashMap<>(newState.nodeFeatures()); + Set allNodesFeatures = ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values()); assert nodesBuilder.isLocalNodeElectedMaster(); @@ -155,16 +162,17 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex if (enforceVersionBarrier) { ensureVersionBarrier(node.getVersion(), minClusterNodeVersion); CompatibilityVersions.ensureVersionsCompatibility(compatibilityVersions, compatibilityVersionsMap.values()); - // TODO: enforce feature ratchet barrier } blockForbiddenVersions(compatibilityVersions.transportVersion()); ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); + enforceNodeFeatureBarrier(node.getId(), allNodesFeatures, features); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); nodesBuilder.add(node); compatibilityVersionsMap.put(node.getId(), compatibilityVersions); nodeFeatures.put(node.getId(), features); + allNodesFeatures.retainAll(features); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -444,6 +452,16 @@ public static void ensureVersionBarrier(Version joiningNodeVersion, Version minC } } + private void enforceNodeFeatureBarrier(String nodeId, Set existingNodesFeatures, Set newNodeFeatures) { + // prevent join if it does not have one or more features that all other nodes have + Set missingFeatures = new HashSet<>(existingNodesFeatures); + missingFeatures.removeAll(newNodeFeatures); + + if (missingFeatures.isEmpty() == false) { + throw new IllegalStateException("Node " + nodeId + " is missing required features " + missingFeatures); + } + } + public static Collection> addBuiltInJoinValidators( Collection> onJoinValidators ) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 9e7383a4c3f14..6afb85bdf629e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -366,8 +366,8 @@ public void onResponse(PublishWithJoinResponse response) { if (response.getJoin().isPresent()) { final Join join = response.getJoin().get(); - assert discoveryNode.equals(join.getSourceNode()); - assert join.getTerm() == response.getPublishResponse().getTerm() : response; + assert discoveryNode.equals(join.votingNode()); + assert join.term() == response.getPublishResponse().getTerm() : response; logger.trace("handling join within publish response: {}", join); onJoin(join); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java index df26646d154c6..cb492f39f9337 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java @@ -16,34 +16,38 @@ /** * Represents the action of requesting a join vote (see {@link Join}) from a node. - * The source node represents the node that is asking for join votes. + * + * A {@link StartJoinRequest} is broadcast to each node in the cluster, requesting + * that each node join the new cluster formed around the master candidate node in a + * new term. The sender is either the new master candidate or the current master + * abdicating to another eligible node in the cluster. */ public class StartJoinRequest extends TransportRequest { - private final DiscoveryNode sourceNode; + private final DiscoveryNode masterCandidateNode; private final long term; - public StartJoinRequest(DiscoveryNode sourceNode, long term) { - this.sourceNode = sourceNode; + public StartJoinRequest(DiscoveryNode masterCandidateNode, long term) { + this.masterCandidateNode = masterCandidateNode; this.term = term; } public StartJoinRequest(StreamInput input) throws IOException { super(input); - this.sourceNode = new DiscoveryNode(input); + this.masterCandidateNode = new DiscoveryNode(input); this.term = input.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - sourceNode.writeTo(out); + masterCandidateNode.writeTo(out); out.writeLong(term); } - public DiscoveryNode getSourceNode() { - return sourceNode; + public DiscoveryNode getMasterCandidateNode() { + return masterCandidateNode; } public long getTerm() { @@ -52,7 +56,7 @@ public long getTerm() { @Override public String toString() { - return "StartJoinRequest{" + "term=" + term + ",node=" + sourceNode + "}"; + return "StartJoinRequest{" + "term=" + term + ",node=" + masterCandidateNode + "}"; } @Override @@ -63,12 +67,12 @@ public boolean equals(Object o) { StartJoinRequest that = (StartJoinRequest) o; if (term != that.term) return false; - return sourceNode.equals(that.sourceNode); + return masterCandidateNode.equals(that.masterCandidateNode); } @Override public int hashCode() { - int result = sourceNode.hashCode(); + int result = masterCandidateNode.hashCode(); result = 31 * result + (int) (term ^ (term >>> 32)); return result; } diff --git a/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java b/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java index 51f90b9610805..a2df7c234680a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/desirednodes/DesiredNodesSettingsValidator.java @@ -16,14 +16,16 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.function.Consumer; import java.util.stream.Collectors; import static java.lang.String.format; -public class DesiredNodesSettingsValidator { +public class DesiredNodesSettingsValidator implements Consumer> { private record DesiredNodeValidationError(int position, @Nullable String externalId, RuntimeException exception) {} - public void validate(List nodes) { + @Override + public void accept(List nodes) { final List validationErrors = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { final DesiredNode node = nodes.get(i); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index faa3010adbf72..a0dd7bc3e9eef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -111,59 +111,7 @@ public static Builder builder() { return new Builder(); } - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null, null); - } - - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null, null); - } - - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( - List indexPatterns, - @Nullable Template template, - @Nullable List componentTemplates, - @Nullable Long priority, - @Nullable Long version, - @Nullable Map metadata, - @Nullable DataStreamTemplate dataStreamTemplate, - @Nullable Boolean allowAutoCreate - ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, allowAutoCreate, null, null); - } - - /** - * @deprecated use {@link Builder} instead - */ - @Deprecated(forRemoval = true) - public ComposableIndexTemplate( + private ComposableIndexTemplate( List indexPatterns, @Nullable Template template, @Nullable List componentTemplates, @@ -416,28 +364,42 @@ public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField HIDDEN = new ParseField("hidden"); private static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + private static final ParseField FAILURE_STORE = new ParseField("failure_store"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_template", false, - args -> new DataStreamTemplate(args[0] != null && (boolean) args[0], args[1] != null && (boolean) args[1]) + args -> new DataStreamTemplate( + args[0] != null && (boolean) args[0], + args[1] != null && (boolean) args[1], + DataStream.isFailureStoreEnabled() && args[2] != null && (boolean) args[2] + ) ); static { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); + if (DataStream.isFailureStoreEnabled()) { + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE); + } } private final boolean hidden; private final boolean allowCustomRouting; + private final boolean failureStore; public DataStreamTemplate() { - this(false, false); + this(false, false, false); } public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { + this(hidden, allowCustomRouting, false); + } + + public DataStreamTemplate(boolean hidden, boolean allowCustomRouting, boolean failureStore) { this.hidden = hidden; this.allowCustomRouting = allowCustomRouting; + this.failureStore = failureStore; } DataStreamTemplate(StreamInput in) throws IOException { @@ -455,6 +417,11 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { boolean value = in.readBoolean(); assert value == false : "expected false, because this used to be an optional enum that never got set"; } + if (in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + failureStore = in.readBoolean(); + } else { + failureStore = false; + } } /** @@ -483,6 +450,10 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + public boolean hasFailureStore() { + return failureStore; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); @@ -493,6 +464,9 @@ public void writeTo(StreamOutput out) throws IOException { // See comment in constructor. out.writeBoolean(false); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + out.writeBoolean(failureStore); + } } @Override @@ -500,6 +474,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("hidden", hidden); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (DataStream.isFailureStoreEnabled()) { + builder.field(FAILURE_STORE.getPreferredName(), failureStore); + } builder.endObject(); return builder; } @@ -509,12 +486,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DataStreamTemplate that = (DataStreamTemplate) o; - return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting; + return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting && failureStore == that.failureStore; } @Override public int hashCode() { - return Objects.hash(hidden, allowCustomRouting); + return Objects.hash(hidden, allowCustomRouting, failureStore); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index c5cf0b29f6273..34d8515d2dfdd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PointValues; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; @@ -26,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -66,7 +68,15 @@ public final class DataStream implements SimpleDiffable, ToXContentObject, IndexAbstraction { + public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); + public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.DATA_STREAM_FAILURE_STORE_ADDED; + + public static boolean isFailureStoreEnabled() { + return FAILURE_STORE_FEATURE_FLAG.isEnabled(); + } + public static final String BACKING_INDEX_PREFIX = ".ds-"; + public static final String FAILURE_STORE_PREFIX = ".fs-"; public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); public static final String TIMESTAMP_FIELD_NAME = "@timestamp"; // Timeseries indices' leaf readers should be sorted by desc order of their timestamp field, as it allows search time optimizations @@ -100,6 +110,8 @@ public final class DataStream implements SimpleDiffable, ToXContentO private final IndexMode indexMode; @Nullable private final DataStreamLifecycle lifecycle; + private final boolean failureStore; + private final List failureIndices; public DataStream( String name, @@ -111,7 +123,9 @@ public DataStream( boolean system, boolean allowCustomRouting, IndexMode indexMode, - DataStreamLifecycle lifecycle + DataStreamLifecycle lifecycle, + boolean failureStore, + List failureIndices ) { this( name, @@ -124,7 +138,9 @@ public DataStream( System::currentTimeMillis, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -140,7 +156,9 @@ public DataStream( LongSupplier timeProvider, boolean allowCustomRouting, IndexMode indexMode, - DataStreamLifecycle lifecycle + DataStreamLifecycle lifecycle, + boolean failureStore, + List failureIndices ) { this.name = name; this.indices = List.copyOf(indices); @@ -155,6 +173,8 @@ public DataStream( this.allowCustomRouting = allowCustomRouting; this.indexMode = indexMode; this.lifecycle = lifecycle; + this.failureStore = failureStore; + this.failureIndices = failureIndices; assert assertConsistent(this.indices); } @@ -170,7 +190,7 @@ public DataStream( boolean allowCustomRouting, IndexMode indexMode ) { - this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null); + this(name, indices, generation, metadata, hidden, replicated, system, allowCustomRouting, indexMode, null, false, List.of()); } private static boolean assertConsistent(List indices) { @@ -207,6 +227,10 @@ public long getGeneration() { return generation; } + public List getFailureIndices() { + return failureIndices; + } + @Override public Index getWriteIndex() { return indices.get(indices.size() - 1); @@ -327,6 +351,16 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } + /** + * Determines if this data stream should persist ingest pipeline and mapping failures from bulk requests to a locally + * configured failure store. + * + * @return Whether this data stream should store ingestion failures. + */ + public boolean isFailureStore() { + return failureStore; + } + @Nullable public IndexMode getIndexMode() { return indexMode; @@ -369,7 +403,20 @@ public DataStream unsafeRollover(Index writeIndex, long generation, boolean time List backingIndices = new ArrayList<>(indices); backingIndices.add(writeIndex); - return new DataStream(name, backingIndices, generation, metadata, hidden, false, system, allowCustomRouting, indexMode, lifecycle); + return new DataStream( + name, + backingIndices, + generation, + metadata, + hidden, + false, + system, + allowCustomRouting, + indexMode, + lifecycle, + failureStore, + failureIndices + ); } /** @@ -444,7 +491,9 @@ public DataStream removeBackingIndex(Index index) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -487,7 +536,9 @@ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBacki system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -545,7 +596,9 @@ public DataStream addBackingIndex(Metadata clusterMetadata, Index index) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -561,7 +614,9 @@ public DataStream promoteDataStream() { timeProvider, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -595,7 +650,9 @@ public DataStream snapshot(Collection indicesInSnapshot) { system, allowCustomRouting, indexMode, - lifecycle + lifecycle, + failureStore, + failureIndices ); } @@ -778,9 +835,28 @@ public static String getDefaultBackingIndexName(String dataStreamName, long gene ); } + /** + * Generates the name of the index that conforms to the default naming convention for backing indices + * on data streams given the specified data stream name, generation, and time. + * + * @param dataStreamName name of the data stream + * @param generation generation of the data stream + * @param epochMillis creation time for the backing index + * @return backing index name + */ + public static String getDefaultFailureStoreName(String dataStreamName, long generation, long epochMillis) { + return String.format( + Locale.ROOT, + FAILURE_STORE_PREFIX + "%s-%s-%06d", + dataStreamName, + DATE_FORMATTER.formatMillis(epochMillis), + generation + ); + } + public DataStream(StreamInput in) throws IOException { this( - in.readString(), + readName(in), readIndices(in), in.readVLong(), in.readMap(), @@ -789,12 +865,19 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, + in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, + in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of() ); } + static String readName(StreamInput in) throws IOException { + String name = in.readString(); + in.readString(); // TODO: clear out the timestamp field, which is a constant https://github.com/elastic/elasticsearch/issues/101991 + return name; + } + static List readIndices(StreamInput in) throws IOException { - in.readString(); // timestamp field, which is always @timestamp return in.readCollectionAsImmutableList(Index::new); } @@ -805,7 +888,7 @@ public static Diff readDiffFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - out.writeString(TIMESTAMP_FIELD_NAME); + out.writeString(TIMESTAMP_FIELD_NAME); // TODO: clear this out in the future https://github.com/elastic/elasticsearch/issues/101991 out.writeCollection(indices); out.writeVLong(generation); out.writeGenericMap(metadata); @@ -821,6 +904,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(lifecycle); } + if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { + out.writeBoolean(failureStore); + out.writeCollection(failureIndices); + } } public static final ParseField NAME_FIELD = new ParseField("name"); @@ -834,6 +921,8 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); public static final ParseField INDEX_MODE = new ParseField("index_mode"); public static final ParseField LIFECYCLE = new ParseField("lifecycle"); + public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); + public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -848,7 +937,9 @@ public void writeTo(StreamOutput out) throws IOException { args[6] != null && (boolean) args[6], args[7] != null && (boolean) args[7], args[8] != null ? IndexMode.fromString((String) args[8]) : null, - (DataStreamLifecycle) args[9] + (DataStreamLifecycle) args[9], + DataStream.isFailureStoreEnabled() && args[10] != null && (boolean) args[10], + DataStream.isFailureStoreEnabled() && args[11] != null ? (List) args[11] : List.of() ) ); @@ -871,6 +962,14 @@ public void writeTo(StreamOutput out) throws IOException { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> DataStreamLifecycle.fromXContent(p), LIFECYCLE); + if (DataStream.isFailureStoreEnabled()) { + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD); + PARSER.declareObjectArray( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> Index.fromXContent(p), + FAILURE_INDICES_FIELD + ); + } } public static DataStream fromXContent(XContentParser parser) throws IOException { @@ -895,6 +994,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla .endObject(); builder.xContentList(INDICES_FIELD.getPreferredName(), indices); builder.field(GENERATION_FIELD.getPreferredName(), generation); + if (DataStream.isFailureStoreEnabled() && failureIndices.isEmpty() == false) { + builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices); + } if (metadata != null) { builder.field(METADATA_FIELD.getPreferredName(), metadata); } @@ -902,6 +1004,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(REPLICATED_FIELD.getPreferredName(), replicated); builder.field(SYSTEM_FIELD.getPreferredName(), system); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); + if (DataStream.isFailureStoreEnabled()) { + builder.field(FAILURE_STORE_FIELD.getPreferredName(), failureStore); + } if (indexMode != null) { builder.field(INDEX_MODE.getPreferredName(), indexMode); } @@ -927,12 +1032,27 @@ public boolean equals(Object o) { && replicated == that.replicated && allowCustomRouting == that.allowCustomRouting && indexMode == that.indexMode - && Objects.equals(lifecycle, that.lifecycle); + && Objects.equals(lifecycle, that.lifecycle) + && failureStore == that.failureStore + && failureIndices.equals(that.failureIndices); } @Override public int hashCode() { - return Objects.hash(name, indices, generation, metadata, hidden, system, replicated, allowCustomRouting, indexMode, lifecycle); + return Objects.hash( + name, + indices, + generation, + metadata, + hidden, + system, + replicated, + allowCustomRouting, + indexMode, + lifecycle, + failureStore, + failureIndices + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index a3a6e34d445d2..8d7ce0525e943 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -49,6 +50,19 @@ public class DataStreamLifecycle implements SimpleDiffable, // Versions over the wire public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_057; + public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; + + /** + * Check if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set to {@code true}, indicating that + * we're running in a cluster configuration that is only expecting to use data streams lifecycles. + * + * @param settings the node settings + * @return true if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set + */ + public static boolean isDataStreamsLifecycleOnlyMode(final Settings settings) { + return settings.getAsBoolean(DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME, false); + } + public static final Setting CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING = new Setting<>( "cluster.lifecycle.default.rollover", "max_age=auto,max_primary_shard_size=50gb,min_docs=1,max_primary_shard_docs=200000000", @@ -57,6 +71,8 @@ public class DataStreamLifecycle implements SimpleDiffable, Setting.Property.NodeScope ); + public static final DataStreamLifecycle DEFAULT = new DataStreamLifecycle(); + public static final String DATA_STREAM_LIFECYCLE_ORIGIN = "data_stream_lifecycle"; public static final ParseField ENABLED_FIELD = new ParseField("enabled"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index b50b1e0a74d93..0446b479b191d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.automaton.Automaton; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -63,7 +62,6 @@ public class IndexNameExpressionResolver { private static final Predicate ALWAYS_TRUE = s -> true; public static final String EXCLUDED_DATA_STREAMS_KEY = "es.excluded_ds"; - public static final Version SYSTEM_INDEX_ENFORCEMENT_VERSION = Version.V_8_0_0; public static final IndexVersion SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION = IndexVersions.V_8_0_0; private final ThreadContext threadContext; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 52b522ec5ddaa..d500a8b8e6876 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -46,6 +46,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; + public class MetadataCreateDataStreamService { private static final Logger logger = LogManager.getLogger(MetadataCreateDataStreamService.class); @@ -53,6 +55,7 @@ public class MetadataCreateDataStreamService { private final ThreadPool threadPool; private final ClusterService clusterService; private final MetadataCreateIndexService metadataCreateIndexService; + private final boolean isDslOnlyMode; public MetadataCreateDataStreamService( ThreadPool threadPool, @@ -62,17 +65,23 @@ public MetadataCreateDataStreamService( this.threadPool = threadPool; this.clusterService = clusterService; this.metadataCreateIndexService = metadataCreateIndexService; + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, ActionListener finalListener) { AtomicReference firstBackingIndexRef = new AtomicReference<>(); + AtomicReference firstFailureStoreRef = new AtomicReference<>(); ActionListener listener = finalListener.delegateFailureAndWrap((l, response) -> { if (response.isAcknowledged()) { String firstBackingIndexName = firstBackingIndexRef.get(); assert firstBackingIndexName != null; + String firstFailureStoreName = firstFailureStoreRef.get(); + var waitForIndices = firstFailureStoreName == null + ? new String[] { firstBackingIndexName } + : new String[] { firstBackingIndexName, firstFailureStoreName }; ActiveShardsObserver.waitForActiveShards( clusterService, - new String[] { firstBackingIndexName }, + waitForIndices, ActiveShardCount.DEFAULT, request.masterNodeTimeout(), l.map(shardsAcked -> AcknowledgedResponse.TRUE) @@ -87,8 +96,18 @@ public void createDataStream(CreateDataStreamClusterStateUpdateRequest request, new AckedClusterStateUpdateTask(Priority.HIGH, request, delegate.clusterStateUpdate()) { @Override public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState clusterState = createDataStream(metadataCreateIndexService, currentState, request, delegate.reroute()); - firstBackingIndexRef.set(clusterState.metadata().dataStreams().get(request.name).getIndices().get(0).getName()); + ClusterState clusterState = createDataStream( + metadataCreateIndexService, + currentState, + isDslOnlyMode, + request, + delegate.reroute() + ); + DataStream createdDataStream = clusterState.metadata().dataStreams().get(request.name); + firstBackingIndexRef.set(createdDataStream.getIndices().get(0).getName()); + if (createdDataStream.getFailureIndices().isEmpty() == false) { + firstFailureStoreRef.set(createdDataStream.getFailureIndices().get(0).getName()); + } return clusterState; } } @@ -103,9 +122,9 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String public ClusterState createDataStream( CreateDataStreamClusterStateUpdateRequest request, ClusterState current, - ActionListener listener + ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, current, request, listener); + return createDataStream(metadataCreateIndexService, current, isDslOnlyMode, request, rerouteListener); } public static final class CreateDataStreamClusterStateUpdateRequest extends ClusterStateUpdateRequest< @@ -157,15 +176,20 @@ public boolean performReroute() { public SystemDataStreamDescriptor getSystemDataStreamDescriptor() { return descriptor; } + + long getStartTime() { + return startTime; + } } static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, ClusterState currentState, + boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, - ActionListener listener + ActionListener rerouteListener ) throws Exception { - return createDataStream(metadataCreateIndexService, currentState, request, List.of(), null, listener); + return createDataStream(metadataCreateIndexService, currentState, isDslOnlyMode, request, List.of(), null, rerouteListener); } /** @@ -181,10 +205,11 @@ static ClusterState createDataStream( static ClusterState createDataStream( MetadataCreateIndexService metadataCreateIndexService, ClusterState currentState, + boolean isDslOnlyMode, CreateDataStreamClusterStateUpdateRequest request, List backingIndices, IndexMetadata writeIndex, - ActionListener listener + ActionListener rerouteListener ) throws Exception { String dataStreamName = request.name; SystemDataStreamDescriptor systemDataStreamDescriptor = request.getSystemDataStreamDescriptor(); @@ -213,6 +238,11 @@ static ClusterState createDataStream( "data_stream [" + dataStreamName + "] must not start with '" + DataStream.BACKING_INDEX_PREFIX + "'" ); } + if (dataStreamName.startsWith(DataStream.FAILURE_STORE_PREFIX)) { + throw new IllegalArgumentException( + "data_stream [" + dataStreamName + "] must not start with '" + DataStream.FAILURE_STORE_PREFIX + "'" + ); + } final var metadata = currentState.metadata(); final boolean isSystem = systemDataStreamDescriptor != null; @@ -220,43 +250,47 @@ static ClusterState createDataStream( ? systemDataStreamDescriptor.getComposableIndexTemplate() : lookupTemplateForDataStream(dataStreamName, currentState.metadata()); - if (writeIndex == null) { - String firstBackingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, request.startTime); - CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( - "initialize_data_stream", - firstBackingIndexName, - firstBackingIndexName - ).dataStreamName(dataStreamName) - .systemDataStreamDescriptor(systemDataStreamDescriptor) - .nameResolvedInstant(request.startTime) - .performReroute(request.performReroute()) - .setMatchingTemplate(template); - + // If we need to create a failure store, do so first. Do not reroute during the creation since we will do + // that as part of creating the backing index if required. + IndexMetadata failureStoreIndex = null; + if (template.getDataStreamTemplate().hasFailureStore()) { if (isSystem) { - createIndexRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); - } else { - createIndexRequest.settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + throw new IllegalArgumentException("Failure stores are not supported on system data streams"); } + String failureStoreIndexName = DataStream.getDefaultFailureStoreName(dataStreamName, 1, request.getStartTime()); + currentState = createFailureStoreIndex( + metadataCreateIndexService, + currentState, + request, + dataStreamName, + template, + failureStoreIndexName + ); + failureStoreIndex = currentState.metadata().index(failureStoreIndexName); + } - try { - currentState = metadataCreateIndexService.applyCreateIndexRequest(currentState, createIndexRequest, false, listener); - } catch (ResourceAlreadyExistsException e) { - // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during - // auto index/data stream creation. - // (otherwise bulk execution fails later, because data stream will also not have been created) - throw new ElasticsearchStatusException( - "data stream could not be created because backing index [{}] already exists", - RestStatus.BAD_REQUEST, - e, - firstBackingIndexName - ); - } + if (writeIndex == null) { + String firstBackingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1, request.getStartTime()); + currentState = createBackingIndex( + metadataCreateIndexService, + currentState, + request, + rerouteListener, + dataStreamName, + systemDataStreamDescriptor, + isSystem, + template, + firstBackingIndexName + ); writeIndex = currentState.metadata().index(firstBackingIndexName); } else { - listener.onResponse(null); + rerouteListener.onResponse(null); } assert writeIndex != null; assert writeIndex.mapping() != null : "no mapping found for backing index [" + writeIndex.getIndex().getName() + "]"; + assert template.getDataStreamTemplate().hasFailureStore() == false || failureStoreIndex != null; + assert failureStoreIndex == null || failureStoreIndex.mapping() != null + : "no mapping found for failure store [" + failureStoreIndex.getIndex().getName() + "]"; List dsBackingIndices = backingIndices.stream() .map(IndexMetadata::getIndex) @@ -267,6 +301,7 @@ static ClusterState createDataStream( final DataStreamLifecycle lifecycle = isSystem ? MetadataIndexTemplateService.resolveLifecycle(template, systemDataStreamDescriptor.getComponentTemplates()) : MetadataIndexTemplateService.resolveLifecycle(template, metadata.componentTemplates()); + List failureIndices = failureStoreIndex == null ? List.of() : List.of(failureStoreIndex.getIndex()); DataStream newDataStream = new DataStream( dataStreamName, dsBackingIndices, @@ -277,7 +312,9 @@ static ClusterState createDataStream( isSystem, template.getDataStreamTemplate().isAllowCustomRouting(), indexMode, - lifecycle + lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle, + template.getDataStreamTemplate().hasFailureStore(), + failureIndices ); Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); @@ -301,6 +338,92 @@ static ClusterState createDataStream( return ClusterState.builder(currentState).metadata(builder).build(); } + private static ClusterState createBackingIndex( + MetadataCreateIndexService metadataCreateIndexService, + ClusterState currentState, + CreateDataStreamClusterStateUpdateRequest request, + ActionListener rerouteListener, + String dataStreamName, + SystemDataStreamDescriptor systemDataStreamDescriptor, + boolean isSystem, + ComposableIndexTemplate template, + String firstBackingIndexName + ) throws Exception { + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( + "initialize_data_stream", + firstBackingIndexName, + firstBackingIndexName + ).dataStreamName(dataStreamName) + .systemDataStreamDescriptor(systemDataStreamDescriptor) + .nameResolvedInstant(request.getStartTime()) + .performReroute(request.performReroute()) + .setMatchingTemplate(template); + + if (isSystem) { + createIndexRequest.settings(SystemIndexDescriptor.DEFAULT_SETTINGS); + } else { + createIndexRequest.settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + } + + try { + currentState = metadataCreateIndexService.applyCreateIndexRequest(currentState, createIndexRequest, false, rerouteListener); + } catch (ResourceAlreadyExistsException e) { + // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during + // auto index/data stream creation. + // (otherwise bulk execution fails later, because data stream will also not have been created) + throw new ElasticsearchStatusException( + "data stream could not be created because backing index [{}] already exists", + RestStatus.BAD_REQUEST, + e, + firstBackingIndexName + ); + } + return currentState; + } + + private static ClusterState createFailureStoreIndex( + MetadataCreateIndexService metadataCreateIndexService, + ClusterState currentState, + CreateDataStreamClusterStateUpdateRequest request, + String dataStreamName, + ComposableIndexTemplate template, + String failureStoreIndexName + ) throws Exception { + if (DataStream.isFailureStoreEnabled() == false) { + return currentState; + } + + CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest( + "initialize_data_stream", + failureStoreIndexName, + failureStoreIndexName + ).dataStreamName(dataStreamName) + .nameResolvedInstant(request.getStartTime()) + .performReroute(false) + .setMatchingTemplate(template) + .settings(MetadataRolloverService.HIDDEN_INDEX_SETTINGS); + + try { + currentState = metadataCreateIndexService.applyCreateIndexRequest( + currentState, + createIndexRequest, + false, + AllocationActionListener.rerouteCompletionIsNotRequired() + ); + } catch (ResourceAlreadyExistsException e) { + // Rethrow as ElasticsearchStatusException, so that bulk transport action doesn't ignore it during + // auto index/data stream creation. + // (otherwise bulk execution fails later, because data stream will also not have been created) + throw new ElasticsearchStatusException( + "data stream could not be created because failure store index [{}] already exists", + RestStatus.BAD_REQUEST, + e, + failureStoreIndexName + ); + } + return currentState; + } + public static ComposableIndexTemplate lookupTemplateForDataStream(String dataStreamName, Metadata metadata) { final String v2Template = MetadataIndexTemplateService.findV2Template(metadata, dataStreamName, false); if (v2Template == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 8423a5ad37334..2ebcad22185fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -166,7 +166,9 @@ static ClusterState updateDataLifecycle( dataStream.isSystem(), dataStream.isAllowCustomRouting(), dataStream.getIndexMode(), - lifecycle + lifecycle, + dataStream.isFailureStore(), + dataStream.getFailureIndices() ) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 0c78d497d1194..1e2e15a6300c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -92,6 +92,8 @@ public class MetadataIndexTemplateService { private static final CompressedXContent DEFAULT_TIMESTAMP_MAPPING_WITH_ROUTING; + private static final CompressedXContent DATA_STREAM_FAILURE_STORE_MAPPING; + static { final Map> defaultTimestampField = Map.of( DEFAULT_TIMESTAMP_FIELD, @@ -120,6 +122,110 @@ public class MetadataIndexTemplateService { .map(defaultTimestampField) .endObject() ); + /* + * The data stream failure store mapping. The JSON content is as follows: + * { + * "_doc": { + * "dynamic": false, + * "_routing": { + * "required": false + * }, + * "properties": { + * "@timestamp": { + * "type": "date", + * "ignore_malformed": false + * }, + * "document": { + * "properties": { + * "id": { + * "type": "keyword" + * }, + * "routing": { + * "type": "keyword" + * }, + * "index": { + * "type": "keyword" + * } + * } + * }, + * "error": { + * "properties": { + * "message": { + * "type": "wildcard" + * }, + * "stack_trace": { + * "type": "text" + * }, + * "type": { + * "type": "keyword" + * }, + * "pipeline": { + * "type": "keyword" + * }, + * "pipeline_trace": { + * "type": "keyword" + * }, + * "processor": { + * "type": "keyword" + * } + * } + * } + * } + * } + * } + */ + DATA_STREAM_FAILURE_STORE_MAPPING = new CompressedXContent( + (builder, params) -> builder.startObject(MapperService.SINGLE_MAPPING_NAME) + .field("dynamic", false) + .startObject(RoutingFieldMapper.NAME) + .field("required", false) + .endObject() + .startObject("properties") + .startObject(DEFAULT_TIMESTAMP_FIELD) + .field("type", DateFieldMapper.CONTENT_TYPE) + .field("ignore_malformed", false) + .endObject() + .startObject("document") + .startObject("properties") + // document.source is unmapped so that it can be persisted in source only without worrying that the document might cause + // a mapping error + .startObject("id") + .field("type", "keyword") + .endObject() + .startObject("routing") + .field("type", "keyword") + .endObject() + .startObject("index") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .startObject("error") + .startObject("properties") + .startObject("message") + .field("type", "wildcard") + .endObject() + .startObject("stack_trace") + .field("type", "text") + .endObject() + .startObject("type") + .field("type", "keyword") + .endObject() + .startObject("pipeline") + .field("type", "keyword") + .endObject() + .startObject("pipeline_trace") + .field("type", "keyword") + .endObject() + .startObject("processor") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ); + } catch (IOException e) { throw new AssertionError(e); } @@ -1338,6 +1444,10 @@ public static List collectMappings( final String indexName ) { Objects.requireNonNull(template, "Composable index template must be provided"); + // Check if this is a failure store index, and if it is, discard any template mappings. Failure store mappings are predefined. + if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.FAILURE_STORE_PREFIX)) { + return List.of(DATA_STREAM_FAILURE_STORE_MAPPING, ComposableIndexTemplate.DataStreamTemplate.DATA_STREAM_MAPPING_SNIPPET); + } List mappings = template.composedOf() .stream() .map(componentTemplates::get) @@ -1348,7 +1458,7 @@ public static List collectMappings( .collect(Collectors.toCollection(LinkedList::new)); // Add the actual index template's mappings, since it takes the highest precedence Optional.ofNullable(template.template()).map(Template::mappings).ifPresent(mappings::add); - if (template.getDataStreamTemplate() != null && indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { + if (template.getDataStreamTemplate() != null && isDataStreamIndex(indexName)) { // add a default mapping for the `@timestamp` field, at the lowest precedence, to make bootstrapping data streams more // straightforward as all backing indices are required to have a timestamp field if (template.getDataStreamTemplate().isAllowCustomRouting()) { @@ -1359,7 +1469,7 @@ public static List collectMappings( } // Only include _timestamp mapping snippet if creating backing index. - if (indexName.startsWith(DataStream.BACKING_INDEX_PREFIX)) { + if (isDataStreamIndex(indexName)) { // Only if template has data stream definition this should be added and // adding this template last, since _timestamp field should have highest precedence: if (template.getDataStreamTemplate() != null) { @@ -1369,6 +1479,10 @@ public static List collectMappings( return Collections.unmodifiableList(mappings); } + private static boolean isDataStreamIndex(String indexName) { + return indexName.startsWith(DataStream.BACKING_INDEX_PREFIX) || indexName.startsWith(DataStream.FAILURE_STORE_PREFIX); + } + /** * Resolve index settings for the given list of v1 templates, templates are apply in reverse * order since they should be provided in order of priority/order diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index b268be27e17ac..f7fa34d76498a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.isDataStreamsLifecycleOnlyMode; import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.createDataStream; public class MetadataMigrateToDataStreamService { @@ -63,6 +64,7 @@ public class MetadataMigrateToDataStreamService { private final IndicesService indexServices; private final ThreadContext threadContext; private final MetadataCreateIndexService metadataCreateIndexService; + private final boolean isDslOnlyMode; public MetadataMigrateToDataStreamService( ThreadPool threadPool, @@ -74,6 +76,7 @@ public MetadataMigrateToDataStreamService( this.indexServices = indexServices; this.threadContext = threadPool.getThreadContext(); this.metadataCreateIndexService = metadataCreateIndexService; + this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); } public void migrateToDataStream( @@ -104,7 +107,7 @@ public void migrateToDataStream( @Override public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState clusterState = migrateToDataStream(currentState, indexMetadata -> { + ClusterState clusterState = migrateToDataStream(currentState, isDslOnlyMode, indexMetadata -> { try { return indexServices.createIndexMapperServiceForValidation(indexMetadata); } catch (IOException e) { @@ -125,6 +128,7 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String static ClusterState migrateToDataStream( ClusterState currentState, + boolean isDslOnlyMode, Function mapperSupplier, MigrateToDataStreamClusterStateUpdateRequest request, MetadataCreateIndexService metadataCreateIndexService, @@ -155,6 +159,7 @@ static ClusterState migrateToDataStream( return createDataStream( metadataCreateIndexService, currentState, + isDslOnlyMode, req, backingIndices, currentState.metadata().index(writeIndex), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 1310c0f7ec5c9..5891b953acfca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -20,7 +20,11 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionMultiListener; import org.elasticsearch.cluster.service.ClusterService; @@ -40,7 +44,9 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Locale; +import java.util.Objects; import java.util.Set; import java.util.function.BiFunction; @@ -192,9 +198,57 @@ ClusterState execute(ClusterState currentState) { } if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) { - throw new IllegalArgumentException( - String.format(Locale.ROOT, "Can't update non dynamic settings [%s] for open indices %s", skippedSettings, openIndices) - ); + if (request.reopenShards()) { + // We have non-dynamic settings and open indices. We will unassign all of the shards in these indices so that the new + // changed settings are applied when the shards are re-assigned. + routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); + for (Index index : openIndices) { + // We only want to take on the expense of reopening all shards for an index if the setting is really changing + Settings existingSettings = currentState.getMetadata().index(index).getSettings(); + boolean needToReopenIndex = false; + for (String setting : skippedSettings) { + String newValue = request.settings().get(setting); + if (Objects.equals(newValue, existingSettings.get(setting)) == false) { + needToReopenIndex = true; + break; + } + } + if (needToReopenIndex) { + List shardRoutingList = currentState.routingTable().allShards(index.getName()); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (ShardRouting shardRouting : shardRoutingList) { + if (ShardRoutingState.UNASSIGNED.equals(shardRouting.state()) == false) { + indexRoutingTableBuilder.addShard( + shardRouting.moveToUnassigned( + new UnassignedInfo( + UnassignedInfo.Reason.INDEX_REOPENED, + "Unassigning shards to update static settings" + ) + ) + ); + } else { + indexRoutingTableBuilder.addShard(shardRouting); + } + } + routingTableBuilder.add(indexRoutingTableBuilder.build()); + openIndices.remove(index); + closedIndices.add(index); + } + } + } else { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Can't update non dynamic settings [%s] for open indices %s unless the `reopen` query parameter is set to " + + "true. Alternatively, close the indices, apply the settings changes, and reopen the indices", + skippedSettings, + openIndices + ) + ); + } } if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { @@ -209,10 +263,12 @@ ClusterState execute(ClusterState currentState) { * * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? */ - routingTableBuilder = RoutingTable.builder( - allocationService.getShardRoutingRoleStrategy(), - currentState.routingTable() - ); + if (routingTableBuilder == null) { + routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); + } routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index ecc26d15d001f..15fab193dad57 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -36,27 +36,72 @@ public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXConten public static final String NODE_ALLOCATION_DECISION_KEY = "node_allocation_decision"; private final SingleNodeShutdownMetadata.Status status; + private final long startedShards; + private final long relocatingShards; + private final long initializingShards; private final long shardsRemaining; @Nullable private final String explanation; @Nullable private final ShardAllocationDecision allocationDecision; - public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining) { - this(status, shardsRemaining, null, null); + public ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long shardsRemaining, + @Nullable String explanation, + @Nullable ShardAllocationDecision allocationDecision + ) { + this(status, -1, -1, -1, shardsRemaining, explanation, null); } - public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining, @Nullable String explanation) { - this(status, shardsRemaining, explanation, null); + public ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards + ) { + this( + status, + startedShards, + relocatingShards, + initializingShards, + startedShards + relocatingShards + initializingShards, + null, + null + ); } public ShutdownShardMigrationStatus( SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards, + @Nullable String explanation + ) { + this( + status, + startedShards, + relocatingShards, + initializingShards, + startedShards + relocatingShards + initializingShards, + explanation, + null + ); + } + + private ShutdownShardMigrationStatus( + SingleNodeShutdownMetadata.Status status, + long startedShards, + long relocatingShards, + long initializingShards, long shardsRemaining, @Nullable String explanation, @Nullable ShardAllocationDecision allocationDecision ) { this.status = Objects.requireNonNull(status, "status must not be null"); + this.startedShards = startedShards; + this.relocatingShards = relocatingShards; + this.initializingShards = initializingShards; this.shardsRemaining = shardsRemaining; this.explanation = explanation; this.allocationDecision = allocationDecision; @@ -64,7 +109,17 @@ public ShutdownShardMigrationStatus( public ShutdownShardMigrationStatus(StreamInput in) throws IOException { this.status = in.readEnum(SingleNodeShutdownMetadata.Status.class); - this.shardsRemaining = in.readLong(); + if (in.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS)) { + this.startedShards = in.readZLong(); + this.relocatingShards = in.readZLong(); + this.initializingShards = in.readZLong(); + this.shardsRemaining = in.readZLong(); + } else { + this.startedShards = -1; + this.relocatingShards = -1; + this.initializingShards = -1; + this.shardsRemaining = in.readLong(); + } this.explanation = in.readOptionalString(); if (in.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { this.allocationDecision = in.readOptionalWriteable(ShardAllocationDecision::new); @@ -99,6 +154,11 @@ public Iterator toXContentChunked(ToXContent.Params params private XContentBuilder buildHeader(XContentBuilder builder) throws IOException { builder.field("status", status); + if (startedShards != -1) { + builder.field("started_shards", startedShards); + builder.field("relocating_shards", relocatingShards); + builder.field("initializing_shards", initializingShards); + } builder.field("shard_migrations_remaining", shardsRemaining); if (Objects.nonNull(explanation)) { builder.field("explanation", explanation); @@ -109,7 +169,14 @@ private XContentBuilder buildHeader(XContentBuilder builder) throws IOException @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(status); - out.writeLong(shardsRemaining); + if (out.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_MIGRATION_STATUS_INCLUDE_COUNTS)) { + out.writeZLong(startedShards); + out.writeZLong(relocatingShards); + out.writeZLong(initializingShards); + out.writeZLong(shardsRemaining); + } else { + out.writeLong(shardsRemaining); + } out.writeOptionalString(explanation); if (out.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { out.writeOptionalWriteable(allocationDecision); @@ -119,9 +186,12 @@ public void writeTo(StreamOutput out) throws IOException { @Override public boolean equals(Object o) { if (this == o) return true; - if ((o instanceof ShutdownShardMigrationStatus) == false) return false; + if (o == null || getClass() != o.getClass()) return false; ShutdownShardMigrationStatus that = (ShutdownShardMigrationStatus) o; - return shardsRemaining == that.shardsRemaining + return startedShards == that.startedShards + && relocatingShards == that.relocatingShards + && initializingShards == that.initializingShards + && shardsRemaining == that.shardsRemaining && status == that.status && Objects.equals(explanation, that.explanation) && Objects.equals(allocationDecision, that.allocationDecision); @@ -129,7 +199,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(status, shardsRemaining, explanation, allocationDecision); + return Objects.hash(status, startedShards, relocatingShards, initializingShards, shardsRemaining, explanation, allocationDecision); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java new file mode 100644 index 0000000000000..05c0876669732 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.snapshots.SnapshotShardSizeInfo; + +import java.util.Set; + +public class ExpectedShardSizeEstimator { + + public static long getExpectedShardSize(ShardRouting shardRouting, long defaultSize, RoutingAllocation allocation) { + return getExpectedShardSize( + shardRouting, + defaultSize, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + } + + /** + * Returns the expected shard size for the given shard or the default value provided if not enough information are available + * to estimate the shards size. + */ + public static long getExpectedShardSize( + ShardRouting shard, + long defaultValue, + ClusterInfo clusterInfo, + SnapshotShardSizeInfo snapshotShardSizeInfo, + Metadata metadata, + RoutingTable routingTable + ) { + final IndexMetadata indexMetadata = metadata.getIndexSafe(shard.index()); + if (indexMetadata.getResizeSourceIndex() != null + && shard.active() == false + && shard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { + return getExpectedSizeOfResizedShard(shard, defaultValue, indexMetadata, clusterInfo, metadata, routingTable); + } else if (shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { + return snapshotShardSizeInfo.getShardSize(shard, defaultValue); + } else { + return clusterInfo.getShardSize(shard, defaultValue); + } + } + + private static long getExpectedSizeOfResizedShard( + ShardRouting shard, + long defaultValue, + IndexMetadata indexMetadata, + ClusterInfo clusterInfo, + Metadata metadata, + RoutingTable routingTable + ) { + // in the shrink index case we sum up the source index shards since we basically make a copy of the shard in the worst case + long targetShardSize = 0; + final Index mergeSourceIndex = indexMetadata.getResizeSourceIndex(); + final IndexMetadata sourceIndexMetadata = metadata.index(mergeSourceIndex); + if (sourceIndexMetadata != null) { + final Set shardIds = IndexMetadata.selectRecoverFromShards( + shard.id(), + sourceIndexMetadata, + indexMetadata.getNumberOfShards() + ); + final IndexRoutingTable indexRoutingTable = routingTable.index(mergeSourceIndex.getName()); + for (int i = 0; i < indexRoutingTable.size(); i++) { + IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); + if (shardIds.contains(shardRoutingTable.shardId())) { + targetShardSize += clusterInfo.getShardSize(shardRoutingTable.primaryShard(), 0); + } + } + } + return targetShardSize == 0 ? defaultValue : targetShardSize; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index bd15d924c9c19..8e257ff2c7a54 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -114,6 +114,7 @@ public class IndexShardRoutingTable { allShardsStarted = false; } } + assert primary != null || shards.isEmpty() : shards; this.primary = primary; this.replicas = CollectionUtils.wrapUnmodifiableOrEmptySingleton(replicas); this.activeShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(activeShards); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index 5f477a9ca66df..ea0ee630ef073 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.routing.allocation; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -16,6 +15,7 @@ import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import java.util.Iterator; import java.util.List; @@ -152,7 +152,11 @@ public class DiskThresholdSettings { private volatile TimeValue rerouteInterval; static { - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; // this check is unnecessary in v9 + checkAutoReleaseIndexEnabled(); + } + + @UpdateForV9 // this check is unnecessary in v9 + private static void checkAutoReleaseIndexEnabled() { final String AUTO_RELEASE_INDEX_ENABLED_KEY = "es.disk.auto_release_flood_stage_block"; final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); if (property != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 8d336d2147e11..64f88ac1e2417 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; @@ -42,6 +41,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.shard.ShardId; @@ -57,6 +57,7 @@ import java.util.stream.StreamSupport; import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.REPLACE; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; @@ -150,6 +151,7 @@ public BalancedShardsAllocator(ClusterSettings clusterSettings, WriteLoadForecas * * Once {@link org.elasticsearch.Version#V_7_17_0} goes out of scope, start to properly reject such bad values. */ + @UpdateForV9 private static float ensureValidThreshold(float threshold) { if (1.0f <= threshold) { return threshold; @@ -1037,11 +1039,7 @@ private void allocateUnassigned() { logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); } - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation - ); + final long shardSize = getExpectedShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation); shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); minNode.addShard(shard); if (shard.primary() == false) { @@ -1064,11 +1062,7 @@ private void allocateUnassigned() { if (minNode != null) { // throttle decision scenario assert allocationDecision.getAllocationStatus() == AllocationStatus.DECIDERS_THROTTLED; - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation - ); + final long shardSize = getExpectedShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation); minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); } else { if (logger.isTraceEnabled()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java index eecd0a7410513..7cdffc3a5bf24 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ContinuousComputation.java @@ -10,12 +10,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.threadpool.ThreadPool; import java.util.Objects; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; /** @@ -27,15 +27,15 @@ public abstract class ContinuousComputation { private static final Logger logger = LogManager.getLogger(ContinuousComputation.class); - private final ExecutorService executorService; + private final Executor executor; private final AtomicReference enqueuedInput = new AtomicReference<>(); private final Processor processor = new Processor(); /** - * @param threadPool Each computation runs on a {@code GENERIC} thread from this thread pool. At most one task executes at once. + * @param executor the {@link Executor} with which to execute the computation */ - public ContinuousComputation(ThreadPool threadPool) { - this.executorService = threadPool.generic(); + public ContinuousComputation(Executor executor) { + this.executor = executor; } /** @@ -44,7 +44,7 @@ public ContinuousComputation(ThreadPool threadPool) { public void onNewInput(T input) { assert input != null; if (enqueuedInput.getAndSet(Objects.requireNonNull(input)) == null) { - executorService.execute(processor); + executor.execute(processor); } } @@ -74,6 +74,7 @@ private class Processor extends AbstractRunnable { @Override public void onFailure(Exception e) { + logger.error(Strings.format("unexpected error processing [%s]", ContinuousComputation.this), e); assert false : e; } @@ -85,14 +86,16 @@ public void onRejection(Exception e) { } @Override - protected void doRun() throws Exception { + protected void doRun() { final T input = enqueuedInput.get(); assert input != null; - processInput(input); - - if (enqueuedInput.compareAndSet(input, null) == false) { - executorService.execute(this); + try { + processInput(input); + } finally { + if (enqueuedInput.compareAndSet(input, null) == false) { + executor.execute(this); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 7d24872cf51dc..60a6ec2e49899 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -51,6 +51,7 @@ public class DesiredBalanceComputer { private final ThreadPool threadPool; private final ShardsAllocator delegateAllocator; + // stats protected final MeanMetric iterations = new MeanMetric(); public static final Setting PROGRESS_LOG_INTERVAL_SETTING = Setting.timeSetting( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 048ade3ef86c5..dc3cbfa8b5ae8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -30,6 +29,10 @@ import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.telemetry.metric.DoubleGauge; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongGaugeMetric; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; @@ -40,6 +43,7 @@ import java.util.stream.IntStream; import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.REPLACE; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; /** * Given the current allocation of shards and the desired balance, performs the next (legal) shard movements towards the goal. @@ -69,13 +73,57 @@ public class DesiredBalanceReconciler { private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool) { + // stats + /** + * Number of unassigned shards during last reconciliation + */ + protected final LongGaugeMetric unassignedShards; + /** + * Total number of assigned shards during last reconciliation + */ + protected final LongGaugeMetric totalAllocations; + /** + * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved + */ + protected final LongGaugeMetric undesiredAllocations; + private final DoubleGauge undesiredAllocationsRatio; + + public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, MeterRegistry meterRegistry) { this.undesiredAllocationLogInterval = new FrequencyCappedAction(threadPool); clusterSettings.initializeAndWatch(UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, this.undesiredAllocationLogInterval::setMinInterval); clusterSettings.initializeAndWatch( UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); + + unassignedShards = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.shards.unassigned", + "Unassigned shards count", + "{shard}" + ); + totalAllocations = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.shards.count", + "Total shards count", + "{shard}" + ); + undesiredAllocations = LongGaugeMetric.create( + meterRegistry, + "es.allocator.desired_balance.allocations.undesired", + "Count of shards allocated on undesired nodes", + "{shard}" + ); + undesiredAllocationsRatio = meterRegistry.registerDoubleGauge( + "es.allocator.desired_balance.allocations.undesired_ratio", + "Ratio of undesired allocations to shard count", + "1", + () -> { + var total = totalAllocations.get(); + var undesired = undesiredAllocations.get(); + return new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0); + } + ); } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { @@ -271,14 +319,7 @@ private void allocateUnassigned() { switch (decision.type()) { case YES -> { logger.debug("Assigning shard [{}] to {} [{}]", shard, nodeIdsIterator.source, nodeId); - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); + long shardSize = getExpectedShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation); routingNodes.initializeShard(shard, nodeId, null, shardSize, allocation.changes()); allocationOrdering.recordAllocation(nodeId); if (shard.primary() == false) { @@ -452,8 +493,9 @@ private void balance() { return; } - long allAllocations = 0; - long undesiredAllocations = 0; + int unassignedShards = routingNodes.unassigned().size() + routingNodes.unassigned().ignored().size(); + int totalAllocations = 0; + int undesiredAllocations = 0; // Iterate over all started shards and try to move any which are on undesired nodes. In the presence of throttling shard // movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the @@ -461,7 +503,7 @@ private void balance() { for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { final var shardRouting = iterator.next(); - allAllocations++; + totalAllocations++; if (shardRouting.started() == false) { // can only rebalance started shards @@ -511,10 +553,14 @@ private void balance() { } } - maybeLogUndesiredAllocationsWarning(allAllocations, undesiredAllocations, routingNodes.size()); + DesiredBalanceReconciler.this.unassignedShards.set(unassignedShards); + DesiredBalanceReconciler.this.undesiredAllocations.set(undesiredAllocations); + DesiredBalanceReconciler.this.totalAllocations.set(totalAllocations); + + maybeLogUndesiredAllocationsWarning(totalAllocations, undesiredAllocations, routingNodes.size()); } - private void maybeLogUndesiredAllocationsWarning(long allAllocations, long undesiredAllocations, int nodeCount) { + private void maybeLogUndesiredAllocationsWarning(int allAllocations, int undesiredAllocations, int nodeCount) { // more shards than cluster can relocate with one reroute final boolean nonEmptyRelocationBacklog = undesiredAllocations > 2L * nodeCount; final boolean warningThresholdReached = undesiredAllocations > undesiredAllocationsLogThreshold * allAllocations; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index ee95074b8a730..64f1eb704a2f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -77,14 +78,16 @@ public DesiredBalanceShardsAllocator( ShardsAllocator delegateAllocator, ThreadPool threadPool, ClusterService clusterService, - DesiredBalanceReconcilerAction reconciler + DesiredBalanceReconcilerAction reconciler, + TelemetryProvider telemetryProvider ) { this( delegateAllocator, threadPool, clusterService, new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), - reconciler + reconciler, + telemetryProvider ); } @@ -93,14 +96,19 @@ public DesiredBalanceShardsAllocator( ThreadPool threadPool, ClusterService clusterService, DesiredBalanceComputer desiredBalanceComputer, - DesiredBalanceReconcilerAction reconciler + DesiredBalanceReconcilerAction reconciler, + TelemetryProvider telemetryProvider ) { this.delegateAllocator = delegateAllocator; this.threadPool = threadPool; this.reconciler = reconciler; this.desiredBalanceComputer = desiredBalanceComputer; - this.desiredBalanceReconciler = new DesiredBalanceReconciler(clusterService.getClusterSettings(), threadPool); - this.desiredBalanceComputation = new ContinuousComputation<>(threadPool) { + this.desiredBalanceReconciler = new DesiredBalanceReconciler( + clusterService.getClusterSettings(), + threadPool, + telemetryProvider.getMeterRegistry() + ); + this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { @Override protected void processInput(DesiredBalanceInput desiredBalanceInput) { @@ -141,7 +149,7 @@ private DesiredBalance getInitialDesiredBalance() { @Override public String toString() { - return "DesiredBalanceShardsAllocator#updateDesiredBalanceAndReroute"; + return "DesiredBalanceShardsAllocator#allocate"; } }; this.queue = new PendingListenersQueue(); @@ -264,7 +272,7 @@ public void resetDesiredBalance() { public DesiredBalanceStats getStats() { return new DesiredBalanceStats( - currentDesiredBalance.lastConvergedIndex(), + Math.max(currentDesiredBalance.lastConvergedIndex(), 0L), desiredBalanceComputation.isActive(), computationsSubmitted.count(), computationsExecuted.count(), @@ -272,7 +280,10 @@ public DesiredBalanceStats getStats() { desiredBalanceComputer.iterations.sum(), computedShardMovements.sum(), cumulativeComputationTime.count(), - cumulativeReconciliationTime.count() + cumulativeReconciliationTime.count(), + desiredBalanceReconciler.unassignedShards.get(), + desiredBalanceReconciler.totalAllocations.get(), + desiredBalanceReconciler.undesiredAllocations.get() ); } @@ -282,6 +293,10 @@ private void onNoLongerMaster() { queue.completeAllAsNotMaster(); pendingDesiredBalanceMoves.clear(); desiredBalanceReconciler.clear(); + + desiredBalanceReconciler.unassignedShards.set(0); + desiredBalanceReconciler.totalAllocations.set(0); + desiredBalanceReconciler.undesiredAllocations.set(0); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java index c017d77362427..8a95b947735f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java @@ -19,6 +19,8 @@ import java.io.IOException; +import static org.elasticsearch.TransportVersions.ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS; + public record DesiredBalanceStats( long lastConvergedIndex, boolean computationActive, @@ -28,11 +30,21 @@ public record DesiredBalanceStats( long computationIterations, long computedShardMovements, long cumulativeComputationTime, - long cumulativeReconciliationTime + long cumulativeReconciliationTime, + long unassignedShards, + long totalAllocations, + long undesiredAllocations ) implements Writeable, ToXContentObject { private static final TransportVersion COMPUTED_SHARD_MOVEMENTS_VERSION = TransportVersions.V_8_8_0; + public DesiredBalanceStats { + if (lastConvergedIndex < 0) { + assert false : lastConvergedIndex; + throw new IllegalStateException("lastConvergedIndex must be nonnegative, but got [" + lastConvergedIndex + ']'); + } + } + public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { return new DesiredBalanceStats( in.readVLong(), @@ -43,7 +55,10 @@ public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { in.readVLong(), in.getTransportVersion().onOrAfter(COMPUTED_SHARD_MOVEMENTS_VERSION) ? in.readVLong() : -1, in.readVLong(), - in.readVLong() + in.readVLong(), + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1, + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1, + in.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS) ? in.readVLong() : -1 ); } @@ -60,6 +75,11 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeVLong(cumulativeComputationTime); out.writeVLong(cumulativeReconciliationTime); + if (out.getTransportVersion().onOrAfter(ADDITIONAL_DESIRED_BALANCE_RECONCILIATION_STATS)) { + out.writeVLong(unassignedShards); + out.writeVLong(totalAllocations); + out.writeVLong(undesiredAllocations); + } } @Override @@ -74,7 +94,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("computed_shard_movements", computedShardMovements); builder.humanReadableField("computation_time_in_millis", "computation_time", new TimeValue(cumulativeComputationTime)); builder.humanReadableField("reconciliation_time_in_millis", "reconciliation_time", new TimeValue(cumulativeReconciliationTime)); + builder.field("unassigned_shards", unassignedShards); + builder.field("total_allocations", totalAllocations); + builder.field("undesired_allocations", undesiredAllocations); + builder.field("undesired_allocations_ratio", undesiredAllocationsRatio()); builder.endObject(); return builder; } + + public double undesiredAllocationsRatio() { + if (unassignedShards == -1 || totalAllocations == -1 || undesiredAllocations == -1) { + return -1.0; + } else if (totalAllocations == 0) { + return 0.0; + } else { + return (double) undesiredAllocations / totalAllocations; + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index d916aa7638786..0e0d15a02d042 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -10,13 +10,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; @@ -29,12 +26,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.core.UpdateForV9; import java.util.Map; -import java.util.Set; + +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially @@ -73,6 +69,7 @@ public class DiskThresholdDecider extends AllocationDecider { public static final String NAME = "disk_threshold"; + @UpdateForV9 public static final Setting ENABLE_FOR_SINGLE_DATA_NODE = Setting.boolSetting( "cluster.routing.allocation.disk.watermark.enable_for_single_data_node", true, @@ -102,7 +99,6 @@ public void validate(Boolean value) { public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) { this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); - assert Version.CURRENT.major < 9 : "remove enable_for_single_data_node in 9"; // get deprecation warnings. boolean enabledForSingleDataNode = ENABLE_FOR_SINGLE_DATA_NODE.get(settings); assert enabledForSingleDataNode; @@ -541,61 +537,6 @@ private Decision earlyTerminate(Map usages) { return null; } - public static long getExpectedShardSize(ShardRouting shardRouting, long defaultSize, RoutingAllocation allocation) { - return DiskThresholdDecider.getExpectedShardSize( - shardRouting, - defaultSize, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - } - - /** - * Returns the expected shard size for the given shard or the default value provided if not enough information are available - * to estimate the shards size. - */ - public static long getExpectedShardSize( - ShardRouting shard, - long defaultValue, - ClusterInfo clusterInfo, - SnapshotShardSizeInfo snapshotShardSizeInfo, - Metadata metadata, - RoutingTable routingTable - ) { - final IndexMetadata indexMetadata = metadata.getIndexSafe(shard.index()); - if (indexMetadata.getResizeSourceIndex() != null - && shard.active() == false - && shard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { - // in the shrink index case we sum up the source index shards since we basically make a copy of the shard in - // the worst case - long targetShardSize = 0; - final Index mergeSourceIndex = indexMetadata.getResizeSourceIndex(); - final IndexMetadata sourceIndexMeta = metadata.index(mergeSourceIndex); - if (sourceIndexMeta != null) { - final Set shardIds = IndexMetadata.selectRecoverFromShards( - shard.id(), - sourceIndexMeta, - indexMetadata.getNumberOfShards() - ); - final IndexRoutingTable indexRoutingTable = routingTable.index(mergeSourceIndex.getName()); - for (int i = 0; i < indexRoutingTable.size(); i++) { - IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); - if (shardIds.contains(shardRoutingTable.shardId())) { - targetShardSize += clusterInfo.getShardSize(shardRoutingTable.primaryShard(), 0); - } - } - } - return targetShardSize == 0 ? defaultValue : targetShardSize; - } else { - if (shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { - return snapshotShardSizeInfo.getShardSize(shard, defaultValue); - } - return clusterInfo.getShardSize(shard, defaultValue); - } - } - record DiskUsageWithRelocations(DiskUsage diskUsage, long relocatingShardSize) { double getFreeDiskAsPercentage() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index be04022685b85..b562ba8e9482d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -12,7 +12,8 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.index.shard.ShardId; + +import java.util.Objects; /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that @@ -65,26 +66,52 @@ private static Decision canMove(ShardRouting shardRouting, RoutingAllocation all return YES_NOT_RUNNING; } - final ShardId shardId = shardRouting.shardId(); - return snapshotsInProgress.asStream() - .filter(entry -> entry.hasShardsInInitState() && entry.isClone() == false) - .map(entry -> entry.shards().get(shardId)) - .filter( - shardSnapshotStatus -> shardSnapshotStatus != null - && shardSnapshotStatus.state().completed() == false - && shardSnapshotStatus.nodeId() != null - && shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId()) - ) - .findAny() - .map( - shardSnapshotStatus -> allocation.decision( + if (shardRouting.currentNodeId() == null) { + // Shard is not assigned to a node + return YES_NOT_SNAPSHOTTED; + } + + for (final var entriesByRepo : snapshotsInProgress.entriesByRepo()) { + for (final var entry : entriesByRepo) { + if (entry.isClone()) { + // clones do not run on data nodes + continue; + } + + if (entry.hasShardsInInitState() == false) { + // this snapshot has no running shard snapshots + // (NB this means we let ABORTED shards move without waiting for them to complete) + continue; + } + + final var shardSnapshotStatus = entry.shards().get(shardRouting.shardId()); + + if (shardSnapshotStatus == null) { + // this snapshot is not snapshotting the shard to allocate + continue; + } + + if (shardSnapshotStatus.state().completed()) { + // this shard snapshot is complete + continue; + } + + if (Objects.equals(shardRouting.currentNodeId(), shardSnapshotStatus.nodeId()) == false) { + // this shard snapshot is allocated to a different node + continue; + } + + return allocation.decision( Decision.THROTTLE, NAME, - "waiting for snapshotting of shard [%s] to complete on this node [%s]", - shardId, - shardSnapshotStatus.nodeId() - ) - ) - .orElse(YES_NOT_SNAPSHOTTED); + "waiting for snapshot [%s] of shard [%s] to complete on node [%s]", + entry.snapshot(), + shardRouting.shardId(), + shardRouting.currentNodeId() + ); + } + } + + return YES_NOT_SNAPSHOTTED; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java index 76ca9f88b4b58..74da033fd8811 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java @@ -249,7 +249,7 @@ static void updateShardAllocationStatus( ); public static final String ENABLE_TIER_ACTION_GUIDE = "https://ela.st/enable-tier"; - public static final Map ACTION_ENABLE_TIERS_LOOKUP = DataTier.ALL_DATA_TIERS.stream() + private static final Map ACTION_ENABLE_TIERS_LOOKUP = DataTier.ALL_DATA_TIERS.stream() .collect( Collectors.toUnmodifiableMap( tier -> tier, @@ -276,7 +276,7 @@ static void updateShardAllocationStatus( INCREASE_SHARD_LIMIT_ACTION_GUIDE ); - public static final Map ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS + private static final Map ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS .stream() .collect( Collectors.toUnmodifiableMap( @@ -307,7 +307,7 @@ static void updateShardAllocationStatus( INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE ); - public static final Map ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS + private static final Map ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS .stream() .collect( Collectors.toUnmodifiableMap( @@ -405,6 +405,7 @@ static void updateShardAllocationStatus( TIER_CAPACITY_ACTION_GUIDE ); + // Visible for testing public static final Map ACTION_INCREASE_TIER_CAPACITY_LOOKUP = DataTier.ALL_DATA_TIERS.stream() .collect( Collectors.toUnmodifiableMap( @@ -622,11 +623,11 @@ List diagnoseAllocationResults( ClusterState state, List nodeAllocationResults ) { - IndexMetadata index = state.metadata().index(shardRouting.index()); + IndexMetadata indexMetadata = state.metadata().index(shardRouting.index()); List diagnosisDefs = new ArrayList<>(); - if (index != null) { - diagnosisDefs.addAll(checkIsAllocationDisabled(index, nodeAllocationResults)); - diagnosisDefs.addAll(checkDataTierRelatedIssues(index, nodeAllocationResults, state)); + if (indexMetadata != null) { + diagnosisDefs.addAll(checkIsAllocationDisabled(indexMetadata, nodeAllocationResults)); + diagnosisDefs.addAll(checkNodeRoleRelatedIssues(indexMetadata, nodeAllocationResults, state, shardRouting)); } if (diagnosisDefs.isEmpty()) { diagnosisDefs.add(ACTION_CHECK_ALLOCATION_EXPLAIN_API); @@ -640,7 +641,7 @@ List diagnoseAllocationResults( * @param outcome The outcome expected * @return A predicate that returns true if the decision exists and matches the expected outcome, false otherwise. */ - private static Predicate hasDeciderResult(String deciderName, Decision.Type outcome) { + protected static Predicate hasDeciderResult(String deciderName, Decision.Type outcome) { return (nodeResult) -> { Decision decision = nodeResult.getCanAllocateDecision(); return decision != null && decision.getDecisions().stream().anyMatch(d -> deciderName.equals(d.label()) && outcome == d.type()); @@ -676,26 +677,29 @@ List checkIsAllocationDisabled(IndexMetadata indexMetadata } /** - * Generates a list of diagnoses for common problems that keep a shard from allocating to nodes in a data tier. + * Generates a list of diagnoses for common problems that keep a shard from allocating to nodes depending on their role; + * a very common example of such a case are data tiers. * @param indexMetadata Index metadata for the shard being diagnosed. * @param nodeAllocationResults allocation decision results for all nodes in the cluster. * @param clusterState the current cluster state. + * @param shardRouting the shard the nodeAllocationResults refer to * @return A list of diagnoses for the provided unassigned shard */ - public List checkDataTierRelatedIssues( + protected List checkNodeRoleRelatedIssues( IndexMetadata indexMetadata, List nodeAllocationResults, - ClusterState clusterState + ClusterState clusterState, + ShardRouting shardRouting ) { List diagnosisDefs = new ArrayList<>(); - if (indexMetadata.getTierPreference().size() > 0) { + if (indexMetadata.getTierPreference().isEmpty() == false) { List dataTierAllocationResults = nodeAllocationResults.stream() .filter(hasDeciderResult(DATA_TIER_ALLOCATION_DECIDER_NAME, Decision.Type.YES)) .toList(); if (dataTierAllocationResults.isEmpty()) { // Shard must be allocated on specific tiers but no nodes were enabled for those tiers. for (String tier : indexMetadata.getTierPreference()) { - Optional.ofNullable(ACTION_ENABLE_TIERS_LOOKUP.get(tier)).ifPresent(diagnosisDefs::add); + Optional.ofNullable(getAddNodesWithRoleAction(tier)).ifPresent(diagnosisDefs::add); } } else { // Collect the nodes from the tiers this index is allowed on @@ -719,29 +723,29 @@ public List checkDataTierRelatedIssues( // Run checks for data tier specific problems diagnosisDefs.addAll( - checkDataTierAtShardLimit(indexMetadata, clusterState, dataTierAllocationResults, dataTierNodes, preferredTier) + checkNodesWithRoleAtShardLimit(indexMetadata, clusterState, dataTierAllocationResults, dataTierNodes, preferredTier) ); diagnosisDefs.addAll(checkDataTierShouldMigrate(indexMetadata, dataTierAllocationResults, preferredTier, dataTierNodes)); - checkNotEnoughNodesInDataTier(dataTierAllocationResults, preferredTier).ifPresent(diagnosisDefs::add); + checkNotEnoughNodesWithRole(dataTierAllocationResults, preferredTier).ifPresent(diagnosisDefs::add); } } return diagnosisDefs; } - private List checkDataTierAtShardLimit( + protected List checkNodesWithRoleAtShardLimit( IndexMetadata indexMetadata, ClusterState clusterState, - List dataTierAllocationResults, - Set dataTierNodes, - @Nullable String preferredTier + List nodeRoleAllocationResults, + Set nodesWithRoles, + @Nullable String role ) { - // All tier nodes at shards limit? - if (dataTierAllocationResults.stream().allMatch(hasDeciderResult(ShardsLimitAllocationDecider.NAME, Decision.Type.NO))) { + // All applicable nodes at shards limit? + if (nodeRoleAllocationResults.stream().allMatch(hasDeciderResult(ShardsLimitAllocationDecider.NAME, Decision.Type.NO))) { List diagnosisDefs = new ArrayList<>(); - // We need the routing nodes for the tiers this index is allowed on to determine the offending shard limits - List dataTierRoutingNodes = clusterState.getRoutingNodes() + // We need the routing nodes for the role this index is allowed on to determine the offending shard limits + List candidateNodes = clusterState.getRoutingNodes() .stream() - .filter(routingNode -> dataTierNodes.contains(routingNode.node())) + .filter(routingNode -> nodesWithRoles.contains(routingNode.node())) .toList(); // Determine which total_shards_per_node settings are present @@ -752,34 +756,29 @@ private List checkDataTierAtShardLimit( // Determine which total_shards_per_node settings are keeping things from allocating boolean clusterShardsPerNodeShouldChange = false; if (clusterShardsPerNode > 0) { - int minShardCountInTier = dataTierRoutingNodes.stream() - .map(RoutingNode::numberOfOwningShards) - .min(Integer::compareTo) - .orElse(-1); - clusterShardsPerNodeShouldChange = minShardCountInTier >= clusterShardsPerNode; + int minShardCount = candidateNodes.stream().map(RoutingNode::numberOfOwningShards).min(Integer::compareTo).orElse(-1); + clusterShardsPerNodeShouldChange = minShardCount >= clusterShardsPerNode; } boolean indexShardsPerNodeShouldChange = false; if (indexShardsPerNode > 0) { - int minShardCountInTier = dataTierRoutingNodes.stream() + int minShardCount = candidateNodes.stream() .map(routingNode -> routingNode.numberOfOwningShardsForIndex(indexMetadata.getIndex())) .min(Integer::compareTo) .orElse(-1); - indexShardsPerNodeShouldChange = minShardCountInTier >= indexShardsPerNode; + indexShardsPerNodeShouldChange = minShardCount >= indexShardsPerNode; } // Add appropriate diagnosis - if (preferredTier != null) { - // We cannot allocate the shard to the most preferred tier because a shard limit is reached. + if (role != null) { + // We cannot allocate the shard to the most preferred role because a shard limit is reached. if (clusterShardsPerNodeShouldChange) { - Optional.ofNullable(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(preferredTier)) - .ifPresent(diagnosisDefs::add); + Optional.ofNullable(getIncreaseShardLimitClusterSettingAction(role)).ifPresent(diagnosisDefs::add); } if (indexShardsPerNodeShouldChange) { - Optional.ofNullable(ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(preferredTier)).ifPresent(diagnosisDefs::add); + Optional.ofNullable(getIncreaseShardLimitIndexSettingAction(role)).ifPresent(diagnosisDefs::add); } } else { - // We couldn't determine a desired tier. This is likely because there are no tiers in the cluster, - // only `data` nodes. Give a generic ask for increasing the shard limit. + // We couldn't determine a desired role. Give a generic ask for increasing the shard limit. if (clusterShardsPerNodeShouldChange) { diagnosisDefs.add(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING); } @@ -838,16 +837,16 @@ private static List checkDataTierShouldMigrate( } } - private static Optional checkNotEnoughNodesInDataTier( - List dataTierAllocationResults, - @Nullable String preferredTier + protected Optional checkNotEnoughNodesWithRole( + List nodeAllocationResults, + @Nullable String role ) { - // Not enough tier nodes to hold shards on different nodes? - if (dataTierAllocationResults.stream().allMatch(hasDeciderResult(SameShardAllocationDecider.NAME, Decision.Type.NO))) { - // We couldn't determine a desired tier. This is likely because there are no tiers in the cluster, - // only `data` nodes. Give a generic ask for increasing the shard limit. - if (preferredTier != null) { - return Optional.ofNullable(ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(preferredTier)); + // Not enough nodes to hold shards on different nodes? + if (nodeAllocationResults.stream().allMatch(hasDeciderResult(SameShardAllocationDecider.NAME, Decision.Type.NO))) { + // We couldn't determine a desired role. This is likely because there are no nodes with the relevant role in the cluster. + // Give a generic ask for increasing the shard limit. + if (role != null) { + return Optional.ofNullable(getIncreaseNodeWithRoleCapacityAction(role)); } else { return Optional.of(ACTION_INCREASE_NODE_CAPACITY); } @@ -856,6 +855,26 @@ private static Optional checkNotEnoughNodesInDataTier( } } + @Nullable + public Diagnosis.Definition getAddNodesWithRoleAction(String role) { + return ACTION_ENABLE_TIERS_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseShardLimitIndexSettingAction(String role) { + return ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseShardLimitClusterSettingAction(String role) { + return ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseNodeWithRoleCapacityAction(String role) { + return ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(role); + } + public class ShardAllocationStatus { protected final ShardAllocationCounts primaries = new ShardAllocationCounts(); protected final ShardAllocationCounts replicas = new ShardAllocationCounts(); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java new file mode 100644 index 0000000000000..bfecc577f7a47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.service; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +public class TransportFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + // transport version was introduced in 8.8.0, but we need to wait until all nodes are >8.8.0 + // to properly detect when we need to fix transport versions + return Map.of(TransportVersionsFixupListener.FIX_TRANSPORT_VERSION, Version.V_8_8_1); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index a54130aec95b6..e77d44e5ad71e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -26,6 +25,9 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.threadpool.Scheduler; @@ -47,10 +49,13 @@ * due to the master node not understanding cluster state with transport versions added in 8.8.0. * Any nodes with the inferred placeholder cluster state is then refreshed with their actual transport version */ +@UpdateForV9 // this can be removed in v9 public class TransportVersionsFixupListener implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(TransportVersionsFixupListener.class); + static final NodeFeature FIX_TRANSPORT_VERSION = new NodeFeature("transport.fix_transport_version"); + private static final TimeValue RETRY_TIME = TimeValue.timeValueSeconds(30); private final MasterServiceTaskQueue taskQueue; @@ -58,13 +63,20 @@ public class TransportVersionsFixupListener implements ClusterStateListener { private final Scheduler scheduler; private final Executor executor; private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); + private final FeatureService featureService; - public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient client, ThreadPool threadPool) { + public TransportVersionsFixupListener( + ClusterService service, + ClusterAdminClient client, + FeatureService featureService, + ThreadPool threadPool + ) { // there tends to be a lot of state operations on an upgrade - this one is not time-critical, // so use LOW priority. It just needs to be run at some point after upgrade. this( service.createTaskQueue("fixup-transport-versions", Priority.LOW, new TransportVersionUpdater()), client, + featureService, threadPool, threadPool.executor(ThreadPool.Names.CLUSTER_COORDINATION) ); @@ -73,11 +85,13 @@ public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient TransportVersionsFixupListener( MasterServiceTaskQueue taskQueue, ClusterAdminClient client, + FeatureService featureService, Scheduler scheduler, Executor executor ) { this.taskQueue = taskQueue; this.client = client; + this.featureService = featureService; this.scheduler = scheduler; this.executor = executor; } @@ -139,7 +153,7 @@ public void clusterChanged(ClusterChangedEvent event) { // if the min node version > 8.8.0, and the cluster state has some transport versions == 8.8.0, // then refresh all inferred transport versions to their real versions // now that everything should understand cluster state with transport versions - if (event.state().nodes().getMinNodeVersion().after(Version.V_8_8_0) + if (featureService.clusterHasFeature(event.state(), FIX_TRANSPORT_VERSION) && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes diff --git a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java index b7afe8211184a..a12071f9c27e3 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.telemetry.metric.LongCounter; import java.util.concurrent.atomic.AtomicLong; @@ -29,17 +30,25 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { private final Logger logger; private final HierarchyCircuitBreakerService parent; private final String name; + private final LongCounter trippedCountMeter; /** * Create a circuit breaker that will break if the number of estimated * bytes grows above the limit. All estimations will be multiplied by * the given overheadConstant. Uses the given oldBreaker to initialize * the starting offset. + * @param trippedCountMeter the counter used to report the tripped count metric * @param settings settings to configure this breaker * @param parent parent circuit breaker service to delegate tripped breakers to * @param name the name of the breaker */ - public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, HierarchyCircuitBreakerService parent, String name) { + public ChildMemoryCircuitBreaker( + LongCounter trippedCountMeter, + BreakerSettings settings, + Logger logger, + HierarchyCircuitBreakerService parent, + String name + ) { this.name = name; this.limitAndOverhead = new LimitAndOverhead(settings.getLimit(), settings.getOverhead()); this.durability = settings.getDurability(); @@ -48,6 +57,7 @@ public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, Hierar this.logger = logger; logger.trace(() -> format("creating ChildCircuitBreaker with settings %s", settings)); this.parent = parent; + this.trippedCountMeter = trippedCountMeter; } /** @@ -58,6 +68,7 @@ public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, Hierar public void circuitBreak(String fieldName, long bytesNeeded) { final long memoryBytesLimit = this.limitAndOverhead.limit; this.trippedCount.incrementAndGet(); + this.trippedCountMeter.increment(); final String message = "[" + this.name + "] Data too large, data for [" diff --git a/server/src/main/java/org/elasticsearch/common/cache/Cache.java b/server/src/main/java/org/elasticsearch/common/cache/Cache.java index 7cd6fa471040a..98a4b90da73d5 100644 --- a/server/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/server/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -102,8 +102,8 @@ void setExpireAfterAccessNanos(long expireAfterAccessNanos) { this.entriesExpireAfterAccess = true; } - // pkg-private for testing - long getExpireAfterAccessNanos() { + // public for testing + public long getExpireAfterAccessNanos() { return this.expireAfterAccessNanos; } diff --git a/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java index 6b702f41e7c5d..60e6fa5fff22a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/ModulesBuilder.java @@ -22,6 +22,11 @@ public ModulesBuilder add(Module... newModules) { return this; } + public T bindToInstance(Class cls, T instance) { + modules.add(b -> b.bind(cls).toInstance(instance)); + return instance; + } + @Override public Iterator iterator() { return modules.iterator(); diff --git a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java index 3e184c41ef006..b0d1ec931b0be 100644 --- a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java +++ b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java @@ -83,7 +83,7 @@ static void closeChannels(List channels, boolean if (blocking) { ArrayList> futures = new ArrayList<>(channels.size()); for (final C channel : channels) { - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + PlainActionFuture closeFuture = new PlainActionFuture<>(); channel.addCloseListener(closeFuture); futures.add(closeFuture); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 69e61e7e70001..8e469973c0f08 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -80,6 +80,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -425,6 +426,7 @@ public void apply(Settings value, Settings current, Settings previous) { ScriptService.CONTEXTS_ALLOWED_SETTING, IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING, IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, + IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_EXPIRE, IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, HunspellService.HUNSPELL_LAZY_LOAD, @@ -577,6 +579,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesClusterStateService.SHARD_LOCK_RETRY_TIMEOUT_SETTING, IngestSettings.GROK_WATCHDOG_INTERVAL, IngestSettings.GROK_WATCHDOG_MAX_EXECUTION_TIME, - TDigestExecutionHint.SETTING + TDigestExecutionHint.SETTING, + MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING, + MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING ).filter(Objects::nonNull).collect(Collectors.toSet()); } diff --git a/server/src/main/java/org/elasticsearch/common/unit/Processors.java b/server/src/main/java/org/elasticsearch/common/unit/Processors.java index c71005678fc0d..481ced3bb31d6 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Processors.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Processors.java @@ -56,7 +56,7 @@ public static Processors of(Double count) { } if (validNumberOfProcessors(count) == false) { - throw new IllegalArgumentException("processors must be a non-negative number; provided [" + count + "]"); + throw new IllegalArgumentException("processors must be a positive number; provided [" + count + "]"); } return new Processors(count); diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 72a2fc41a9a12..2c623882afe14 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -64,6 +64,10 @@ public byte set(long index, byte value) { @Override public boolean get(long index, int len, BytesRef ref) { assert index + len <= size(); + if (len == 0) { + ref.length = 0; + return false; + } int pageIndex = pageIndex(index); final int indexInPage = indexInPage(index); if (indexInPage + len <= pageSize()) { diff --git a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java index 467efff9e72c4..081dec3f6b7db 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java +++ b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java @@ -192,7 +192,7 @@ private final class CachedItem extends AbstractRefCounted { CachedItem(Key key) { this.key = key; - incRef(); // start with a refcount of 2 so we're not closed while adding the first listener + mustIncRef(); // start with a refcount of 2 so we're not closed while adding the first listener this.future.addListener(new ActionListener<>() { @Override public void onResponse(Value value) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java index d6ac42a9211c9..34236b957dea2 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledIterator.java @@ -88,7 +88,7 @@ private void run() { } } try (var itemRefs = new ItemRefCounted()) { - itemRefs.incRef(); + itemRefs.mustIncRef(); itemConsumer.accept(Releasables.releaseOnce(itemRefs::decRef), item); } catch (Exception e) { logger.error(Strings.format("exception when processing [%s] with [%s]", item, itemConsumer), e); @@ -108,7 +108,7 @@ private class ItemRefCounted extends AbstractRefCounted implements Releasable { private boolean isRecursive = true; ItemRefCounted() { - refs.incRef(); + refs.mustIncRef(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java index 0a2b631f2b545..5a49896cf1a36 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java @@ -94,7 +94,7 @@ public static Iterator wrapWithObject(String return Iterators.concat(startObject(name), iterator, endObject()); } - private static Iterator map(String name, Map map, Function, ToXContent> toXContent) { + public static Iterator map(String name, Map map, Function, ToXContent> toXContent) { return wrapWithObject(name, Iterators.map(map.entrySet().iterator(), toXContent)); } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index fabc10e336368..0552335ab092d 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.Coordinator; @@ -26,6 +25,7 @@ import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; @@ -35,6 +35,8 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.NodeHealthService; @@ -62,7 +64,7 @@ /** * A module for loading classes for node discovery. */ -public class DiscoveryModule { +public class DiscoveryModule extends AbstractModule { private static final Logger logger = LogManager.getLogger(DiscoveryModule.class); public static final String MULTI_NODE_DISCOVERY_TYPE = "multi-node"; @@ -112,7 +114,7 @@ public DiscoveryModule( NodeHealthService nodeHealthService, CircuitBreakerService circuitBreakerService, CompatibilityVersions compatibilityVersions, - Set features + FeatureService featureService ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -172,19 +174,7 @@ public DiscoveryModule( throw new IllegalArgumentException("Unknown election strategy " + ELECTION_STRATEGY_SETTING.get(settings)); } - if (LEGACY_MULTI_NODE_DISCOVERY_TYPE.equals(discoveryType)) { - assert Version.CURRENT.major == Version.V_7_0_0.major + 1; - DeprecationLogger.getLogger(DiscoveryModule.class) - .critical( - DeprecationCategory.SETTINGS, - "legacy-discovery-type", - "Support for setting [{}] to [{}] is deprecated and will be removed in a future version. Set this setting to [{}] " - + "instead.", - DISCOVERY_TYPE_SETTING.getKey(), - LEGACY_MULTI_NODE_DISCOVERY_TYPE, - MULTI_NODE_DISCOVERY_TYPE - ); - } + checkLegacyMultiNodeDiscoveryType(discoveryType); this.reconfigurator = getReconfigurator(settings, clusterSettings, clusterCoordinationPlugins); var preVoteCollectorFactory = getPreVoteCollectorFactory(clusterCoordinationPlugins); @@ -215,7 +205,7 @@ public DiscoveryModule( leaderHeartbeatService, preVoteCollectorFactory, compatibilityVersions, - features + featureService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); @@ -224,6 +214,22 @@ public DiscoveryModule( logger.info("using discovery type [{}] and seed hosts providers {}", discoveryType, seedProviderNames); } + @UpdateForV9 + private static void checkLegacyMultiNodeDiscoveryType(String discoveryType) { + if (LEGACY_MULTI_NODE_DISCOVERY_TYPE.equals(discoveryType)) { + DeprecationLogger.getLogger(DiscoveryModule.class) + .critical( + DeprecationCategory.SETTINGS, + "legacy-discovery-type", + "Support for setting [{}] to [{}] is deprecated and will be removed in a future version. Set this setting to [{}] " + + "instead.", + DISCOVERY_TYPE_SETTING.getKey(), + LEGACY_MULTI_NODE_DISCOVERY_TYPE, + MULTI_NODE_DISCOVERY_TYPE + ); + } + } + // visible for testing static Reconfigurator getReconfigurator( Settings settings, @@ -285,6 +291,12 @@ public static boolean isSingleNodeDiscovery(Settings settings) { return SINGLE_NODE_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(settings)); } + @Override + protected void configure() { + bind(Coordinator.class).toInstance(coordinator); + bind(Reconfigurator.class).toInstance(reconfigurator); + } + public Coordinator getCoordinator() { return coordinator; } diff --git a/server/src/main/java/org/elasticsearch/env/Environment.java b/server/src/main/java/org/elasticsearch/env/Environment.java index 44cf74c4339a6..2f738eb1412a5 100644 --- a/server/src/main/java/org/elasticsearch/env/Environment.java +++ b/server/src/main/java/org/elasticsearch/env/Environment.java @@ -326,11 +326,7 @@ public static FileStore getFileStore(final Path path) throws IOException { public static long getUsableSpace(Path path) throws IOException { long freeSpaceInBytes = Environment.getFileStore(path).getUsableSpace(); - - /* See: https://bugs.openjdk.java.net/browse/JDK-8162520 */ - if (freeSpaceInBytes < 0) { - freeSpaceInBytes = Long.MAX_VALUE; - } + assert freeSpaceInBytes >= 0; return freeSpaceInBytes; } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 77415bbaea949..2122e5fcc8b6c 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -154,23 +155,19 @@ public void setNodeVersionId(int nodeVersionId) { this.nodeVersion = Version.fromId(nodeVersionId); } - public void setPreviousNodeVersionId(int previousNodeVersionId) { - this.previousNodeVersion = Version.fromId(previousNodeVersionId); - } - public void setOldestIndexVersion(int oldestIndexVersion) { this.oldestIndexVersion = IndexVersion.fromId(oldestIndexVersion); } + private Version getVersionOrFallbackToEmpty() { + return Objects.requireNonNullElse(this.nodeVersion, Version.V_EMPTY); + } + public NodeMetadata build() { - final Version nodeVersion; + @UpdateForV9 // version is required in the node metadata from v9 onwards + final Version nodeVersion = getVersionOrFallbackToEmpty(); final IndexVersion oldestIndexVersion; - if (this.nodeVersion == null) { - assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; - nodeVersion = Version.V_EMPTY; - } else { - nodeVersion = this.nodeVersion; - } + if (this.previousNodeVersion == null) { previousNodeVersion = nodeVersion; } diff --git a/server/src/main/java/org/elasticsearch/features/FeatureData.java b/server/src/main/java/org/elasticsearch/features/FeatureData.java new file mode 100644 index 0000000000000..273617205ee47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/features/FeatureData.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.features; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +import static org.elasticsearch.features.FeatureService.CLUSTER_FEATURES_ADDED_VERSION; + +/** + * Reads and consolidate features exposed by a list {@link FeatureSpecification}, grouping them into historical features and node + * features for the consumption of {@link FeatureService} + */ +public class FeatureData { + private final NavigableMap> historicalFeatures; + private final Map nodeFeatures; + + private FeatureData(NavigableMap> historicalFeatures, Map nodeFeatures) { + this.historicalFeatures = historicalFeatures; + this.nodeFeatures = nodeFeatures; + } + + public static FeatureData createFromSpecifications(List specs) { + Map allFeatures = new HashMap<>(); + + NavigableMap> historicalFeatures = new TreeMap<>(); + Map nodeFeatures = new HashMap<>(); + for (FeatureSpecification spec : specs) { + for (var hfe : spec.getHistoricalFeatures().entrySet()) { + FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); + // the same SPI class can be loaded multiple times if it's in the base classloader + if (existing != null && existing.getClass() != spec.getClass()) { + throw new IllegalArgumentException( + Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", hfe.getKey().id(), existing, spec) + ); + } + + if (hfe.getValue().after(CLUSTER_FEATURES_ADDED_VERSION)) { + throw new IllegalArgumentException( + Strings.format( + "Historical feature [%s] declared by [%s] for version [%s] is not a historical version", + hfe.getKey().id(), + spec, + hfe.getValue() + ) + ); + } + + historicalFeatures.computeIfAbsent(hfe.getValue(), k -> new HashSet<>()).add(hfe.getKey().id()); + } + + for (NodeFeature f : spec.getFeatures()) { + FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); + if (existing != null && existing.getClass() != spec.getClass()) { + throw new IllegalArgumentException( + Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", f.id(), existing, spec) + ); + } + + nodeFeatures.put(f.id(), f); + } + } + + return new FeatureData(consolidateHistoricalFeatures(historicalFeatures), Map.copyOf(nodeFeatures)); + } + + private static NavigableMap> consolidateHistoricalFeatures( + NavigableMap> declaredHistoricalFeatures + ) { + // update each version by adding in all features from previous versions + Set featureAggregator = new HashSet<>(); + for (Map.Entry> versions : declaredHistoricalFeatures.entrySet()) { + featureAggregator.addAll(versions.getValue()); + versions.setValue(Set.copyOf(featureAggregator)); + } + + return Collections.unmodifiableNavigableMap(declaredHistoricalFeatures); + } + + public NavigableMap> getHistoricalFeatures() { + return historicalFeatures; + } + + public Map getNodeFeatures() { + return nodeFeatures; + } +} diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index 5d7632a91b0b8..1d60627656b9e 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -10,19 +10,14 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Set; -import java.util.TreeMap; /** * Manages information on the features supported by nodes in the cluster @@ -39,72 +34,21 @@ public class FeatureService { public static final Version CLUSTER_FEATURES_ADDED_VERSION = Version.V_8_12_0; private final NavigableMap> historicalFeatures; - private final Set nodeFeatures; + private final Map nodeFeatures; public FeatureService(List specs) { - Map allFeatures = new HashMap<>(); - NavigableMap> historicalFeatures = new TreeMap<>(); - Set nodeFeatures = new HashSet<>(); - for (FeatureSpecification spec : specs) { - for (var hfe : spec.getHistoricalFeatures().entrySet()) { - FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); - // the same SPI class can be loaded multiple times if it's in the base classloader - if (existing != null && existing.getClass() != spec.getClass()) { - throw new IllegalArgumentException( - Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", hfe.getKey().id(), existing, spec) - ); - } + var featureData = FeatureData.createFromSpecifications(specs); + nodeFeatures = featureData.getNodeFeatures(); + historicalFeatures = featureData.getHistoricalFeatures(); - if (hfe.getValue().onOrAfter(CLUSTER_FEATURES_ADDED_VERSION)) { - throw new IllegalArgumentException( - Strings.format( - "Historical feature [%s] declared by [%s] for version [%s] is not a historical version", - hfe.getKey().id(), - spec, - hfe.getValue() - ) - ); - } - - historicalFeatures.computeIfAbsent(hfe.getValue(), k -> new HashSet<>()).add(hfe.getKey().id()); - } - - for (NodeFeature f : spec.getFeatures()) { - FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); - if (existing != null && existing.getClass() != spec.getClass()) { - throw new IllegalArgumentException( - Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", f.id(), existing, spec) - ); - } - - nodeFeatures.add(f.id()); - } - } - - this.historicalFeatures = consolidateHistoricalFeatures(historicalFeatures); - this.nodeFeatures = Set.copyOf(nodeFeatures); - - logger.info("Registered local node features {}", nodeFeatures.stream().sorted().toList()); - } - - private static NavigableMap> consolidateHistoricalFeatures( - NavigableMap> declaredHistoricalFeatures - ) { - // update each version by adding in all features from previous versions - Set featureAggregator = new HashSet<>(); - for (Map.Entry> versions : declaredHistoricalFeatures.entrySet()) { - featureAggregator.addAll(versions.getValue()); - versions.setValue(Set.copyOf(featureAggregator)); - } - - return Collections.unmodifiableNavigableMap(declaredHistoricalFeatures); + logger.info("Registered local node features {}", nodeFeatures.keySet().stream().sorted().toList()); } /** * The non-historical features supported by this node. */ - public Set getNodeFeatures() { + public Map getNodeFeatures() { return nodeFeatures; } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a7cf7299a8502..e7b8eadb3f771 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.node.Node; @@ -184,7 +185,7 @@ private PersistedState createOnDiskPersistedState( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "legacy metadata loader is not needed anymore from v9 onwards"; + @UpdateForV9 // legacy metadata loader is not needed anymore from v9 onwards final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 4ba7c91d411f3..1db0ec7346a32 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -54,6 +55,7 @@ public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXCon * meta state with globalGeneration -1 and empty meta data is returned. * @throws IOException if some IOException when loading files occurs or there is no metadata referenced by manifest file. */ + @UpdateForV9 public Tuple loadFullState() throws IOException { final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); if (manifest == null) { diff --git a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java index 0e4722a872c4e..c1efa58a50c86 100644 --- a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java +++ b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java @@ -67,6 +67,12 @@ public Response(final ClusterName clusterName, final List } } + public Response(final ClusterName clusterName, final List indicators, HealthStatus topLevelStatus) { + this.indicators = indicators; + this.clusterName = clusterName; + this.status = topLevelStatus; + } + public ClusterName getClusterName() { return clusterName; } diff --git a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java index 89b6c998c8508..3a5d11f862efc 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java +++ b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java @@ -16,7 +16,7 @@ public class HealthFeatures implements FeatureSpecification { - public static final NodeFeature SUPPORTS_HEALTH = new NodeFeature("supports_health"); + public static final NodeFeature SUPPORTS_HEALTH = new NodeFeature("health.supports_health"); @Override public Map getHistoricalFeatures() { diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index 4d50764aa0cc1..55b03ec1192c8 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -36,11 +36,14 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.health.HealthStatus.GREEN; + /** * This class periodically logs the results of the Health API to the standard Elasticsearch server log file. */ public class HealthPeriodicLogger implements ClusterStateListener, Closeable, SchedulerEngine.Listener { public static final String HEALTH_FIELD_PREFIX = "elasticsearch.health"; + public static final String MESSAGE_FIELD = "message"; public static final Setting POLL_INTERVAL_SETTING = Setting.timeSetting( "health.periodic_logger.poll_interval", @@ -90,7 +93,18 @@ public class HealthPeriodicLogger implements ClusterStateListener, Closeable, Sc * @param client the client used to call the Health Service. * @param healthService the Health Service, where the actual Health API logic lives. */ - public HealthPeriodicLogger(Settings settings, ClusterService clusterService, Client client, HealthService healthService) { + public static HealthPeriodicLogger create( + Settings settings, + ClusterService clusterService, + Client client, + HealthService healthService + ) { + HealthPeriodicLogger logger = new HealthPeriodicLogger(settings, clusterService, client, healthService); + logger.registerListeners(); + return logger; + } + + private HealthPeriodicLogger(Settings settings, ClusterService clusterService, Client client, HealthService healthService) { this.settings = settings; this.clusterService = clusterService; this.client = client; @@ -100,11 +114,8 @@ public HealthPeriodicLogger(Settings settings, ClusterService clusterService, Cl this.enabled = ENABLED_SETTING.get(settings); } - /** - * Initializer method to avoid the publication of a self reference in the constructor. - */ - public void init() { - if (this.enabled) { + private void registerListeners() { + if (enabled) { clusterService.addListener(this); } clusterService.getClusterSettings().addSettingsUpdateConsumer(ENABLED_SETTING, this::enable); @@ -202,6 +213,18 @@ static Map convertToLoggedFields(List ind ); }); + // message field. Show the non-green indicators if they exist. + List nonGreen = indicatorResults.stream() + .filter(p -> p.status() != GREEN) + .map(HealthIndicatorResult::name) + .sorted() + .toList(); + if (nonGreen.isEmpty()) { + result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s", status.xContentValue())); + } else { + result.put(MESSAGE_FIELD, String.format(Locale.ROOT, "health=%s [%s]", status.xContentValue(), String.join(",", nonGreen))); + } + return result; } diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index b5a334e56e94c..4c2b589584bdc 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -131,8 +131,6 @@ public static class Builder { private Disk disk; private ShardLimits shardLimits; - private Builder() {} - private Builder(HealthMetadata healthMetadata) { this.disk = healthMetadata.diskMetadata; this.shardLimits = healthMetadata.shardLimitsMetadata; diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index a6f6eb8750cac..177e4d471cf30 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -91,7 +91,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private volatile BoundTransportAddress boundAddress; private final AtomicLong totalChannelsAccepted = new AtomicLong(); private final Map httpChannels = new ConcurrentHashMap<>(); - private final PlainActionFuture allClientsClosedListener = PlainActionFuture.newFuture(); + private final PlainActionFuture allClientsClosedListener = new PlainActionFuture<>(); private final RefCounted refCounted = AbstractRefCounted.of(() -> allClientsClosedListener.onResponse(null)); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final long shutdownGracePeriodMillis; diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 930b20b927bd8..24df7875f7e3d 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -23,7 +23,6 @@ import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.ChunkedRestResponseBody; import org.elasticsearch.rest.LoggingChunkedRestResponseBody; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; @@ -40,7 +39,7 @@ * The default rest channel for incoming requests. This class implements the basic logic for sending a rest * response. It will set necessary headers nad ensure that bytes are released after the response is sent. */ -public class DefaultRestChannel extends AbstractRestChannel implements RestChannel { +public class DefaultRestChannel extends AbstractRestChannel { static final String CLOSE = "close"; static final String CONNECTION = "connection"; diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 5e8fb556b2089..9991d42e013e3 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -9,13 +9,11 @@ package org.elasticsearch.index; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; public abstract class AbstractIndexComponent { protected final Logger logger; - protected final DeprecationLogger deprecationLogger; protected final IndexSettings indexSettings; /** @@ -23,7 +21,6 @@ public abstract class AbstractIndexComponent { */ protected AbstractIndexComponent(IndexSettings indexSettings) { this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex()); - this.deprecationLogger = DeprecationLogger.getLogger(getClass()); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index b038db5ac379a..2e600bbdc5ed4 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsAccounting; @@ -158,7 +159,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final IndexNameExpressionResolver expressionResolver; private final Supplier indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; - private Supplier documentParsingObserverSupplier; + private final Supplier documentParsingObserverSupplier; @SuppressWarnings("this-escape") public IndexService( @@ -231,7 +232,7 @@ public IndexService( this.indexSortSupplier = () -> indexSettings.getIndexSortConfig() .buildIndexSort( mapperService::fieldType, - (fieldType, searchLookup) -> indexFieldData.getForField(fieldType, FieldDataContext.noRuntimeFields("index sort")) + (fieldType, searchLookup) -> loadFielddata(fieldType, FieldDataContext.noRuntimeFields("index sort")) ); } else { this.indexSortSupplier = () -> null; @@ -662,7 +663,7 @@ public SearchExecutionContext newSearchExecutionContext( shardRequestIndex, indexSettings, indexCache.bitsetFilterCache(), - indexFieldData::getForField, + this::loadFielddata, mapperService(), mapperService().mappingLookup(), similarityService(), @@ -1293,4 +1294,7 @@ public static Map parseRuntimeMappings( return runtimeFieldTypes; } + public IndexFieldData loadFielddata(MappedFieldType fieldType, FieldDataContext fieldDataContext) { + return indexFieldData.getForField(fieldType, fieldDataContext); + } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 6327c2ba53f54..b6bebcf6abb12 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; import java.util.Collection; @@ -44,6 +45,7 @@ private static IndexVersion def(int id, Version luceneVersion) { return new IndexVersion(id, luceneVersion); } + @UpdateForV9 // remove the index versions with which v9 will not need to interact public static final IndexVersion ZERO = def(0, Version.LATEST); public static final IndexVersion V_7_0_0 = def(7_00_00_99, Version.LUCENE_8_0_0); @@ -87,6 +89,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion NEW_SPARSE_VECTOR = def(8_500_001, Version.LUCENE_9_7_0); public static final IndexVersion SPARSE_VECTOR_IN_FIELD_NAMES_SUPPORT = def(8_500_002, Version.LUCENE_9_7_0); public static final IndexVersion UPGRADE_LUCENE_9_8 = def(8_500_003, Version.LUCENE_9_8_0); + public static final IndexVersion ES_VERSION_8_12 = def(8_500_004, Version.LUCENE_9_8_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index bd228db91c0e1..e6b2a861458d0 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -111,17 +111,33 @@ public final class MergePolicyConfig { private final Logger logger; private final boolean mergesEnabled; private volatile Type mergePolicyType; + private final ByteSizeValue defaultMaxMergedSegment; + private final ByteSizeValue defaultMaxTimeBasedMergedSegment; public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + public static final Setting DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( + "indices.merge.policy.max_merged_segment", + DEFAULT_MAX_MERGED_SEGMENT, + ByteSizeValue.ofBytes(1L), + ByteSizeValue.ofBytes(Long.MAX_VALUE), + Setting.Property.NodeScope + ); /** * Time-based data generally gets rolled over, so there is not much value in enforcing a maximum segment size, which has the side effect * of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high * roof that serves as a protection that we expect to never hit. */ public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = new ByteSizeValue(100, ByteSizeUnit.GB); + public static final Setting DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( + "indices.merge.policy.max_time_based_merged_segment", + DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT, + ByteSizeValue.ofBytes(1L), + ByteSizeValue.ofBytes(Long.MAX_VALUE), + Setting.Property.NodeScope + ); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; /** * A default value for {@link LogByteSizeMergePolicy}'s merge factor: 32. This default value differs from the Lucene default of 10 in @@ -262,8 +278,8 @@ MergePolicy getMergePolicy(MergePolicyConfig config, boolean isTimeBasedIndex) { double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); - // TODO is this really a good default number for max_merge_segment, what happens for large indices, - // won't they end up with many segments? + this.defaultMaxMergedSegment = DEFAULT_MAX_MERGED_SEGMENT_SETTING.get(indexSettings.getNodeSettings()); + this.defaultMaxTimeBasedMergedSegment = DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING.get(indexSettings.getNodeSettings()); ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); int mergeFactor = indexSettings.getValue(INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING); @@ -315,8 +331,8 @@ void setMergeFactor(int mergeFactor) { void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { // We use 0 as a placeholder for "unset". if (maxMergedSegment.getBytes() == 0) { - tieredMergePolicy.setMaxMergedSegmentMB(DEFAULT_MAX_MERGED_SEGMENT.getMbFrac()); - timeBasedMergePolicy.setMaxMergeMB(DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(defaultMaxMergedSegment.getMbFrac()); + timeBasedMergePolicy.setMaxMergeMB(defaultMaxTimeBasedMergedSegment.getMbFrac()); } else { tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); timeBasedMergePolicy.setMaxMergeMB(maxMergedSegment.getMbFrac()); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index 7dd605c4c8a73..e19ee050c93a7 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -40,6 +40,7 @@ import org.apache.lucene.analysis.pt.PortugueseAnalyzer; import org.apache.lucene.analysis.ro.RomanianAnalyzer; import org.apache.lucene.analysis.ru.RussianAnalyzer; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; @@ -129,6 +130,7 @@ public static CharArraySet parseStemExclusion(Settings settings, CharArraySet de entry("_portuguese_", PortugueseAnalyzer.getDefaultStopSet()), entry("_romanian_", RomanianAnalyzer.getDefaultStopSet()), entry("_russian_", RussianAnalyzer.getDefaultStopSet()), + entry("_serbian_", SerbianAnalyzer.getDefaultStopSet()), entry("_sorani_", SoraniAnalyzer.getDefaultStopSet()), entry("_spanish_", SpanishAnalyzer.getDefaultStopSet()), entry("_swedish_", SwedishAnalyzer.getDefaultStopSet()), diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index a496429cc3e2b..43437529cd301 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -48,6 +48,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.DocumentParser; @@ -100,7 +101,8 @@ public abstract class Engine implements Closeable { - public static final String SYNC_COMMIT_ID = "sync_id"; // TODO: Remove sync_id in 9.0 + @UpdateForV9 // TODO: Remove sync_id in 9.0 + public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; public static final String FORCE_MERGE_UUID_KEY = "force_merge_uuid"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; @@ -1140,7 +1142,7 @@ public void externalRefresh(String source, ActionListener */ // TODO: Remove or rename for increased clarity public void flush(boolean force, boolean waitIfOngoing) throws EngineException { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(force, waitIfOngoing, future); future.actionGet(); } @@ -1167,7 +1169,7 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { * a lucene commit if nothing needs to be committed. */ public final void flush() throws EngineException { - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(false, false, future); future.actionGet(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 141a06eff0ec6..6cdd86ce6c9a7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -759,13 +759,11 @@ private static String loadHistoryUUID(Map commitData) { private ExternalReaderManager createReaderManager(RefreshWarmerListener externalRefreshListener) throws EngineException { boolean success = false; + ElasticsearchDirectoryReader directoryReader = null; ElasticsearchReaderManager internalReaderManager = null; try { try { - final ElasticsearchDirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap( - DirectoryReader.open(indexWriter), - shardId - ); + directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); internalReaderManager = createInternalReaderManager(directoryReader); ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); @@ -782,7 +780,9 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external } } finally { if (success == false) { // release everything we created on a failure - IOUtils.closeWhileHandlingException(internalReaderManager, indexWriter); + // make sure that we close the directory reader even if the internal reader manager has failed to initialize + var reader = internalReaderManager == null ? directoryReader : internalReaderManager; + IOUtils.closeWhileHandlingException(reader, indexWriter); } } } @@ -2469,7 +2469,7 @@ public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws En if (flushFirst) { logger.trace("start flush for snapshot"); // TODO: Split acquireLastIndexCommit into two apis one with blocking flushes one without - PlainActionFuture future = PlainActionFuture.newFuture(); + PlainActionFuture future = new PlainActionFuture<>(); flush(false, true, future); future.actionGet(); logger.trace("finish flush for snapshot"); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index addc6f33c9eba..a18ea0f90ec08 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -112,7 +112,7 @@ protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String f final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - selectedValues = sortMode.select(values, missingBytes, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + selectedValues = sortMode.select(values, missingBytes, rootDocs, innerDocs, maxChildren); } return selectedValues; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 76463807942a2..dbc3aadde2e9f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -65,7 +65,7 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, doubl final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + return sortMode.select(values, missingValue, rootDocs, innerDocs, maxChildren); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java index 4b8351f430e05..5dbcafcbdb5b8 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java @@ -60,7 +60,7 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, float final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + return sortMode.select(values, missingValue, rootDocs, innerDocs, maxChildren); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index 827e1618adde2..e8d4363ca9932 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -90,7 +90,7 @@ private NumericDocValues getNumericDocValues(LeafReaderContext context, long mis final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + return sortMode.select(values, missingValue, rootDocs, innerDocs, maxChildren); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java index 2e894ea304fdc..340af1c1f7347 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java @@ -10,16 +10,18 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; import org.elasticsearch.index.fielddata.plain.AbstractLeafOrdinalsFieldData; -import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.script.field.ToScriptFieldFactory; import java.io.IOException; @@ -37,7 +39,7 @@ public enum GlobalOrdinalsBuilder { public static IndexOrdinalsFieldData build( final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, - CircuitBreakerService breakerService, + CircuitBreaker breaker, Logger logger, ToScriptFieldFactory toScriptFieldFactory ) throws IOException { @@ -50,9 +52,26 @@ public static IndexOrdinalsFieldData build( atomicFD[i] = indexFieldData.load(indexReader.leaves().get(i)); subs[i] = atomicFD[i].getOrdinalsValues(); } - final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT); + final TermsEnum[] termsEnums = new TermsEnum[subs.length]; + final long[] weights = new long[subs.length]; + // we assume that TermsEnum are visited sequentially, so we can share the counter between them + final long[] counter = new long[1]; + for (int i = 0; i < subs.length; ++i) { + termsEnums[i] = new FilterLeafReader.FilterTermsEnum(subs[i].termsEnum()) { + @Override + public BytesRef next() throws IOException { + // check parent circuit breaker every 65536 calls + if ((counter[0]++ & 0xFFFF) == 0) { + breaker.addEstimateBytesAndMaybeBreak(0L, "Global Ordinals"); + } + return in.next(); + } + }; + weights[i] = subs[i].getValueCount(); + } + final OrdinalMap ordinalMap = OrdinalMap.build(null, termsEnums, weights, PackedInts.DEFAULT); final long memorySizeInBytes = ordinalMap.ramBytesUsed(); - breakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(memorySizeInBytes); + breaker.addWithoutBreaking(memorySizeInBytes); TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS); if (logger.isDebugEnabled()) { @@ -108,5 +127,4 @@ public void close() {} took ); } - } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index 610f4a19f1a52..b4b15a481411d 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -17,6 +17,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; @@ -28,6 +29,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; +import java.util.concurrent.ExecutionException; public abstract class AbstractIndexOrdinalsFieldData implements IndexOrdinalsFieldData { private static final Logger logger = LogManager.getLogger(AbstractIndexOrdinalsFieldData.class); @@ -82,6 +84,8 @@ public LeafOrdinalsFieldData load(LeafReaderContext context) { } catch (Exception e) { if (e instanceof ElasticsearchException) { throw (ElasticsearchException) e; + } else if (e instanceof ExecutionException && e.getCause() instanceof ElasticsearchException) { + throw (ElasticsearchException) e.getCause(); } else { throw new ElasticsearchException(e); } @@ -128,6 +132,8 @@ private IndexOrdinalsFieldData loadGlobalInternal(DirectoryReader indexReader) { } catch (Exception e) { if (e instanceof ElasticsearchException) { throw (ElasticsearchException) e; + } else if (e instanceof ExecutionException && e.getCause() instanceof ElasticsearchException) { + throw (ElasticsearchException) e.getCause(); } else { throw new ElasticsearchException(e); } @@ -136,7 +142,13 @@ private IndexOrdinalsFieldData loadGlobalInternal(DirectoryReader indexReader) { @Override public IndexOrdinalsFieldData loadGlobalDirect(DirectoryReader indexReader) throws Exception { - return GlobalOrdinalsBuilder.build(indexReader, this, breakerService, logger, toScriptFieldFactory); + return GlobalOrdinalsBuilder.build( + indexReader, + this, + breakerService.getBreaker(CircuitBreaker.FIELDDATA), + logger, + toScriptFieldFactory + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 7d542d1e35275..52fc13abf200f 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -52,17 +52,16 @@ public class GetResult implements Writeable, Iterable, ToXContent private static final String FOUND = "found"; private static final String FIELDS = "fields"; - private String index; - private String id; - private long version; - private long seqNo; - private long primaryTerm; - private boolean exists; + private final String index; + private final String id; + private final long version; + private final long seqNo; + private final long primaryTerm; + private final boolean exists; private final Map documentFields; private final Map metaFields; private Map sourceAsMap; private BytesReference source; - private byte[] sourceAsBytes; public GetResult(StreamInput in) throws IOException { index = in.readString(); @@ -155,20 +154,6 @@ public long getPrimaryTerm() { return primaryTerm; } - /** - * The source of the document if exists. - */ - public byte[] source() { - if (source == null) { - return null; - } - if (sourceAsBytes != null) { - return sourceAsBytes; - } - this.sourceAsBytes = BytesReference.toBytes(sourceRef()); - return this.sourceAsBytes; - } - /** * Returns bytes reference, also un compress the source if needed. */ @@ -229,10 +214,6 @@ public Map sourceAsMap() throws ElasticsearchParseException { return sourceAsMap; } - public Map getSource() { - return sourceAsMap(); - } - public Map getMetadataFields() { return metaFields; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java index 90a295e5a25f2..11e57e030dfe7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -15,171 +16,101 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.UnicodeUtil; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.index.mapper.BlockLoader.BlockFactory; import org.elasticsearch.index.mapper.BlockLoader.BooleanBuilder; import org.elasticsearch.index.mapper.BlockLoader.Builder; -import org.elasticsearch.index.mapper.BlockLoader.BuilderFactory; import org.elasticsearch.index.mapper.BlockLoader.BytesRefBuilder; import org.elasticsearch.index.mapper.BlockLoader.Docs; import org.elasticsearch.index.mapper.BlockLoader.DoubleBuilder; import org.elasticsearch.index.mapper.BlockLoader.IntBuilder; import org.elasticsearch.index.mapper.BlockLoader.LongBuilder; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import java.io.IOException; /** * A reader that supports reading doc-values from a Lucene segment in Block fashion. */ -public abstract class BlockDocValuesReader { - public interface Factory { - BlockDocValuesReader build(int segment) throws IOException; - - boolean supportsOrdinals(); - - SortedSetDocValues ordinals(int segment) throws IOException; - } - - protected final Thread creationThread; +public abstract class BlockDocValuesReader implements BlockLoader.AllReader { + private final Thread creationThread; public BlockDocValuesReader() { this.creationThread = Thread.currentThread(); } - /** - * Returns the current doc that this reader is on. - */ - public abstract int docID(); + protected abstract int docId(); /** - * The {@link BlockLoader.Builder} for data of this type. + * Checks if the reader can be used to read a range documents starting with the given docID by the current thread. */ - public abstract Builder builder(BuilderFactory factory, int expectedCount); + @Override + public final boolean canReuse(int startingDocID) { + return creationThread == Thread.currentThread() && docId() <= startingDocID; + } - /** - * Reads the values of the given documents specified in the input block - */ - public abstract BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException; + @Override + public abstract String toString(); - /** - * Reads the values of the given document into the builder - */ - public abstract void readValuesFromSingleDoc(int docId, Builder builder) throws IOException; + public abstract static class DocValuesBlockLoader implements BlockLoader { + public abstract AllReader reader(LeafReaderContext context) throws IOException; - /** - * Checks if the reader can be used to read a range documents starting with the given docID by the current thread. - */ - public static boolean canReuse(BlockDocValuesReader reader, int startingDocID) { - return reader != null && reader.creationThread == Thread.currentThread() && reader.docID() <= startingDocID; - } + @Override + public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + return reader(context); + } - public static BlockLoader booleans(String fieldName) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonBooleans(singleton); - } - return new Booleans(docValues); - }; - } + @Override + public final RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return reader(context); + } - public static BlockLoader bytesRefsFromOrds(String fieldName) { - return new BlockLoader() { - @Override - public BlockDocValuesReader reader(LeafReaderContext context) throws IOException { - SortedSetDocValues docValues = ordinals(context); - SortedDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonOrdinals(singleton); - } - return new Ordinals(docValues); - } + @Override + public final StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } - @Override - public boolean supportsOrdinals() { - return true; - } + @Override + public boolean supportsOrdinals() { + return false; + } - @Override - public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - return DocValues.getSortedSet(context.reader(), fieldName); - } - }; + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + throw new UnsupportedOperationException(); + } } - /** - * Load {@link BytesRef} values from doc values. Prefer {@link #bytesRefsFromOrds} if - * doc values are indexed with ordinals because that's generally much faster. It's - * possible to use this with field data, but generally should be avoided because field - * data has higher per invocation overhead. - */ - public static BlockLoader bytesRefsFromDocValues(CheckedFunction fieldData) { - return context -> new Bytes(fieldData.apply(context)); - } + public static class LongsBlockLoader extends DocValuesBlockLoader { + private final String fieldName; - /** - * Convert from the stored {@link long} into the {@link double} to load. - * Sadly, this will go megamorphic pretty quickly and slow us down, - * but it gets the job done for now. - */ - public interface ToDouble { - double convert(long v); - } + public LongsBlockLoader(String fieldName) { + this.fieldName = fieldName; + } - /** - * Load {@code double} values from doc values. - */ - public static BlockLoader doubles(String fieldName, ToDouble toDouble) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonDoubles(singleton, toDouble); - } - return new Doubles(docValues, toDouble); - }; - } + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } - /** - * Load {@code int} values from doc values. - */ - public static BlockLoader ints(String fieldName) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); - if (singleton != null) { - return new SingletonInts(singleton); + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonLongs(singleton); + } + return new Longs(docValues); } - return new Ints(docValues); - }; - } - - /** - * Load a block of {@code long}s from doc values. - */ - public static BlockLoader longs(String fieldName) { - return context -> { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); if (singleton != null) { return new SingletonLongs(singleton); } - return new Longs(docValues); - }; - } - - /** - * Load blocks with only null. - */ - public static BlockLoader nulls() { - return context -> new Nulls(); + return new ConstantNullsReader(); + } } - @Override - public abstract String toString(); - private static class SingletonLongs extends BlockDocValuesReader { private final NumericDocValues numericDocValues; @@ -188,13 +119,8 @@ private static class SingletonLongs extends BlockDocValuesReader { } @Override - public BlockLoader.LongBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.longsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.LongBuilder builder = factory.longsFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -213,7 +139,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { BlockLoader.LongBuilder blockBuilder = (BlockLoader.LongBuilder) builder; if (numericDocValues.advanceExact(docId)) { blockBuilder.appendLong(numericDocValues.longValue()); @@ -223,13 +149,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return numericDocValues.docID(); } @Override public String toString() { - return "SingletonLongs"; + return "BlockDocValuesReader.SingletonLongs"; } } @@ -242,13 +168,8 @@ private static class Longs extends BlockDocValuesReader { } @Override - public BlockLoader.LongBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.longsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.LongBuilder builder = factory.longsFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -261,7 +182,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (LongBuilder) builder); } @@ -284,14 +205,44 @@ private void read(int doc, LongBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { // There is a .docID on the numericDocValues but it is often not implemented. return docID; } @Override public String toString() { - return "Longs"; + return "BlockDocValuesReader.Longs"; + } + } + + public static class IntsBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + + public IntsBlockLoader(String fieldName) { + this.fieldName = fieldName; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.ints(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonInts(singleton); + } + return new Ints(docValues); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); + if (singleton != null) { + return new SingletonInts(singleton); + } + return new ConstantNullsReader(); } } @@ -303,13 +254,8 @@ private static class SingletonInts extends BlockDocValuesReader { } @Override - public IntBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.intsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.IntBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.IntBuilder builder = factory.intsFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -328,7 +274,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { IntBuilder blockBuilder = (IntBuilder) builder; if (numericDocValues.advanceExact(docId)) { blockBuilder.appendInt(Math.toIntExact(numericDocValues.longValue())); @@ -338,13 +284,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return numericDocValues.docID(); } @Override public String toString() { - return "SingletonInts"; + return "BlockDocValuesReader.SingletonInts"; } } @@ -357,13 +303,8 @@ private static class Ints extends BlockDocValuesReader { } @Override - public IntBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.intsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.IntBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.IntBuilder builder = factory.intsFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -376,7 +317,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (IntBuilder) builder); } @@ -399,14 +340,55 @@ private void read(int doc, IntBuilder builder) throws IOException { } @Override - public int docID() { - // There is a .docID on on the numericDocValues but it is often not implemented. + public int docId() { + // There is a .docID on the numericDocValues but it is often not implemented. return docID; } @Override public String toString() { - return "Ints"; + return "BlockDocValuesReader.Ints"; + } + } + + /** + * Convert from the stored {@link long} into the {@link double} to load. + * Sadly, this will go megamorphic pretty quickly and slow us down, + * but it gets the job done for now. + */ + public interface ToDouble { + double convert(long v); + } + + public static class DoublesBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + private final ToDouble toDouble; + + public DoublesBlockLoader(String fieldName, ToDouble toDouble) { + this.fieldName = fieldName; + this.toDouble = toDouble; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.doubles(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonDoubles(singleton, toDouble); + } + return new Doubles(docValues, toDouble); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); + if (singleton != null) { + return new SingletonDoubles(singleton, toDouble); + } + return new ConstantNullsReader(); } } @@ -421,13 +403,8 @@ private static class SingletonDoubles extends BlockDocValuesReader { } @Override - public DoubleBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.doublesFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.DoubleBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.DoubleBuilder builder = factory.doublesFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -447,7 +424,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { this.docID = docId; DoubleBuilder blockBuilder = (DoubleBuilder) builder; if (docValues.advanceExact(this.docID)) { @@ -458,13 +435,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return docID; } @Override public String toString() { - return "SingletonDoubles"; + return "BlockDocValuesReader.SingletonDoubles"; } } @@ -479,13 +456,8 @@ private static class Doubles extends BlockDocValuesReader { } @Override - public DoubleBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.doublesFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.DoubleBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.DoubleBuilder builder = factory.doublesFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -498,7 +470,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (DoubleBuilder) builder); } @@ -521,13 +493,58 @@ private void read(int doc, DoubleBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { return docID; } @Override public String toString() { - return "Doubles"; + return "BlockDocValuesReader.Doubles"; + } + } + + public static class BytesRefsFromOrdsBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + + public BytesRefsFromOrdsBlockLoader(String fieldName) { + this.fieldName = fieldName; + } + + @Override + public BytesRefBuilder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedSetDocValues docValues = context.reader().getSortedSetDocValues(fieldName); + if (docValues != null) { + SortedDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonOrdinals(singleton); + } + return new Ordinals(docValues); + } + SortedDocValues singleton = context.reader().getSortedDocValues(fieldName); + if (singleton != null) { + return new SingletonOrdinals(singleton); + } + return new ConstantNullsReader(); + } + + @Override + public boolean supportsOrdinals() { + return true; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + return DocValues.getSortedSet(context.reader(), fieldName); + } + + @Override + public String toString() { + return "BytesRefsFromOrds[" + fieldName + "]"; } } @@ -539,12 +556,7 @@ private static class SingletonOrdinals extends BlockDocValuesReader { } @Override - public BytesRefBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.bytesRefsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { try (BlockLoader.SingletonOrdinalsBuilder builder = factory.singletonOrdinalsBuilder(ordinals, docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -562,8 +574,8 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int doc, Builder builder) throws IOException { - if (ordinals.advanceExact(doc)) { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { + if (ordinals.advanceExact(docId)) { ((BytesRefBuilder) builder).appendBytesRef(ordinals.lookupOrd(ordinals.ordValue())); } else { builder.appendNull(); @@ -571,13 +583,13 @@ public void readValuesFromSingleDoc(int doc, Builder builder) throws IOException } @Override - public int docID() { + public int docId() { return ordinals.docID(); } @Override public String toString() { - return "SingletonOrdinals"; + return "BlockDocValuesReader.SingletonOrdinals"; } } @@ -589,13 +601,8 @@ private static class Ordinals extends BlockDocValuesReader { } @Override - public BytesRefBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.bytesRefsFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BytesRefBuilder builder = factory.bytesRefsFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < ordinals.docID()) { @@ -608,12 +615,12 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int doc, Builder builder) throws IOException { - read(doc, (BytesRefBuilder) builder); + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { + read(docId, (BytesRefBuilder) builder); } - private void read(int doc, BytesRefBuilder builder) throws IOException { - if (false == ordinals.advanceExact(doc)) { + private void read(int docId, BytesRefBuilder builder) throws IOException { + if (false == ordinals.advanceExact(docId)) { builder.appendNull(); return; } @@ -630,32 +637,52 @@ private void read(int doc, BytesRefBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { return ordinals.docID(); } @Override public String toString() { - return "Ordinals"; + return "BlockDocValuesReader.Ordinals"; } } - private static class Bytes extends BlockDocValuesReader { - private final SortedBinaryDocValues docValues; - private int docID = -1; + public static class BytesRefsFromBinaryBlockLoader extends DocValuesBlockLoader { + private final String fieldName; - Bytes(SortedBinaryDocValues docValues) { - this.docValues = docValues; + public BytesRefsFromBinaryBlockLoader(String fieldName) { + this.fieldName = fieldName; } @Override - public BytesRefBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.bytesRefsFromDocValues(expectedCount); + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + BinaryDocValues docValues = context.reader().getBinaryDocValues(fieldName); + if (docValues == null) { + return new ConstantNullsReader(); + } + return new BytesRefsFromBinary(docValues); + } + } + + private static class BytesRefsFromBinary extends BlockDocValuesReader { + private final BinaryDocValues docValues; + private final ByteArrayStreamInput in = new ByteArrayStreamInput(); + private final BytesRef scratch = new BytesRef(); + + private int docID = -1; + + BytesRefsFromBinary(BinaryDocValues docValues) { + this.docValues = docValues; } @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.BytesRefBuilder builder = factory.bytesRefs(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < docID) { @@ -668,7 +695,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (BytesRefBuilder) builder); } @@ -678,27 +705,66 @@ private void read(int doc, BytesRefBuilder builder) throws IOException { builder.appendNull(); return; } - int count = docValues.docValueCount(); + BytesRef bytes = docValues.binaryValue(); + assert bytes.length > 0; + in.reset(bytes.bytes, bytes.offset, bytes.length); + int count = in.readVInt(); + scratch.bytes = bytes.bytes; + if (count == 1) { - // TODO read ords in ascending order. Buffers and stuff. - builder.appendBytesRef(docValues.nextValue()); + scratch.length = in.readVInt(); + scratch.offset = in.getPosition(); + builder.appendBytesRef(scratch); return; } builder.beginPositionEntry(); for (int v = 0; v < count; v++) { - builder.appendBytesRef(docValues.nextValue()); + scratch.length = in.readVInt(); + scratch.offset = in.getPosition(); + in.setPosition(scratch.offset + scratch.length); + builder.appendBytesRef(scratch); } builder.endPositionEntry(); } @Override - public int docID() { + public int docId() { return docID; } @Override public String toString() { - return "Bytes"; + return "BlockDocValuesReader.Bytes"; + } + } + + public static class BooleansBlockLoader extends DocValuesBlockLoader { + private final String fieldName; + + public BooleansBlockLoader(String fieldName) { + this.fieldName = fieldName; + } + + @Override + public BooleanBuilder builder(BlockFactory factory, int expectedCount) { + return factory.booleans(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonBooleans(singleton); + } + return new Booleans(docValues); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); + if (singleton != null) { + return new SingletonBooleans(singleton); + } + return new ConstantNullsReader(); } } @@ -710,13 +776,8 @@ private static class SingletonBooleans extends BlockDocValuesReader { } @Override - public BooleanBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.booleansFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.BooleanBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.BooleanBuilder builder = factory.booleansFromDocValues(docs.count())) { int lastDoc = -1; for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); @@ -735,7 +796,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { BooleanBuilder blockBuilder = (BooleanBuilder) builder; if (numericDocValues.advanceExact(docId)) { blockBuilder.appendBoolean(numericDocValues.longValue() != 0); @@ -745,13 +806,13 @@ public void readValuesFromSingleDoc(int docId, Builder builder) throws IOExcepti } @Override - public int docID() { + public int docId() { return numericDocValues.docID(); } @Override public String toString() { - return "SingletonBooleans"; + return "BlockDocValuesReader.SingletonBooleans"; } } @@ -764,13 +825,8 @@ private static class Booleans extends BlockDocValuesReader { } @Override - public BooleanBuilder builder(BuilderFactory factory, int expectedCount) { - return factory.booleansFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.BooleanBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + try (BlockLoader.BooleanBuilder builder = factory.booleansFromDocValues(docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); if (doc < this.docID) { @@ -783,7 +839,7 @@ public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IO } @Override - public void readValuesFromSingleDoc(int docId, Builder builder) throws IOException { + public void read(int docId, BlockLoader.StoredFields storedFields, Builder builder) throws IOException { read(docId, (BooleanBuilder) builder); } @@ -806,61 +862,14 @@ private void read(int doc, BooleanBuilder builder) throws IOException { } @Override - public int docID() { + public int docId() { // There is a .docID on the numericDocValues but it is often not implemented. return docID; } @Override public String toString() { - return "Booleans"; - } - } - - private static class Nulls extends BlockDocValuesReader { - private int docID = -1; - - @Override - public BlockLoader.Builder builder(BuilderFactory factory, int expectedCount) { - return factory.nulls(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BuilderFactory factory, Docs docs) throws IOException { - try (BlockLoader.Builder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - builder.appendNull(); - } - return builder.build(); - } - } - - @Override - public void readValuesFromSingleDoc(int docId, Builder builder) { - this.docID = docId; - builder.appendNull(); - } - - @Override - public int docID() { - return docID; - } - - @Override - public String toString() { - return "Nulls"; - } - } - - /** - * Convert a {@link String} into a utf-8 {@link BytesRef}. - */ - protected static BytesRef toBytesRef(BytesRef scratch, String v) { - int len = UnicodeUtil.maxUTF8Length(v.length()); - if (scratch.bytes.length < len) { - scratch.bytes = new byte[len]; + return "BlockDocValuesReader.Booleans"; } - scratch.length = UnicodeUtil.UTF16toUTF8(v, 0, v.length(), scratch.bytes); - return scratch; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java index af53ab42d35d9..6e0329a61c51e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java @@ -13,8 +13,12 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.Releasable; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.lookup.Source; import java.io.IOException; +import java.util.List; +import java.util.Map; /** * Interface for loading data in a block shape. Instances of this class @@ -22,26 +26,292 @@ */ public interface BlockLoader { /** - * Build a {@link LeafReaderContext leaf} level reader. + * The {@link BlockLoader.Builder} for data of this type. Called when + * loading from a multi-segment or unsorted block. */ - BlockDocValuesReader reader(LeafReaderContext context) throws IOException; + Builder builder(BlockFactory factory, int expectedCount); + + interface Reader { + /** + * Checks if the reader can be used to read a range documents starting with the given docID by the current thread. + */ + boolean canReuse(int startingDocID); + } + + interface ColumnAtATimeReader extends Reader { + /** + * Reads the values of all documents in {@code docs}. + */ + BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException; + } + + interface RowStrideReader extends Reader { + /** + * Reads the values of the given document into the builder. + */ + void read(int docId, StoredFields storedFields, Builder builder) throws IOException; + } + + interface AllReader extends ColumnAtATimeReader, RowStrideReader {} + + interface StoredFields { + Source source(); + + /** + * @return the ID for the current document + */ + String id(); + + /** + * @return the routing path for the current document + */ + String routing(); + + /** + * @return stored fields for the current document + */ + Map> storedFields(); + } + + ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException; + + RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException; + + StoredFieldsSpec rowStrideStoredFieldSpec(); /** * Does this loader support loading bytes via calling {@link #ordinals}. */ - default boolean supportsOrdinals() { - return false; - } + boolean supportsOrdinals(); /** * Load ordinals for the provided context. */ - default SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - throw new IllegalStateException("ordinals not supported"); + SortedSetDocValues ordinals(LeafReaderContext context) throws IOException; + + /** + * Load blocks with only null. + */ + BlockLoader CONSTANT_NULLS = new BlockLoader() { + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.nulls(expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return new ConstantNullsReader(); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new ConstantNullsReader(); + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + + @Override + public boolean supportsOrdinals() { + return false; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public String toString() { + return "ConstantNull"; + } + }; + + /** + * Implementation of {@link ColumnAtATimeReader} and {@link RowStrideReader} that always + * loads {@code null}. + */ + class ConstantNullsReader implements AllReader { + @Override + public Block read(BlockFactory factory, Docs docs) throws IOException { + return factory.constantNulls(); + } + + @Override + public void read(int docId, StoredFields storedFields, Builder builder) throws IOException { + builder.appendNull(); + } + + @Override + public boolean canReuse(int startingDocID) { + return true; + } + + @Override + public String toString() { + return "constant_nulls"; + } } /** - * A list of documents to load. + * Load blocks with only {@code value}. + */ + static BlockLoader constantBytes(BytesRef value) { + return new BlockLoader() { + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return new ColumnAtATimeReader() { + @Override + public Block read(BlockFactory factory, Docs docs) { + return factory.constantBytes(value); + } + + @Override + public boolean canReuse(int startingDocID) { + return true; + } + + @Override + public String toString() { + return "constant[" + value + "]"; + } + }; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new RowStrideReader() { + @Override + public void read(int docId, StoredFields storedFields, Builder builder) { + ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(value); + } + + @Override + public boolean canReuse(int startingDocID) { + return true; + } + + @Override + public String toString() { + return "constant[" + value + "]"; + } + }; + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + + @Override + public boolean supportsOrdinals() { + return false; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public String toString() { + return "ConstantBytes[" + value + "]"; + } + }; + } + + abstract class Delegating implements BlockLoader { + protected final BlockLoader delegate; + + protected Delegating(BlockLoader delegate) { + this.delegate = delegate; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return delegate.builder(factory, expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + ColumnAtATimeReader reader = delegate.columnAtATimeReader(context); + if (reader == null) { + return null; + } + return new ColumnAtATimeReader() { + @Override + public Block read(BlockFactory factory, Docs docs) throws IOException { + return reader.read(factory, docs); + } + + @Override + public boolean canReuse(int startingDocID) { + return reader.canReuse(startingDocID); + } + + @Override + public String toString() { + return "Delegating[to=" + delegatingTo() + ", impl=" + reader + "]"; + } + }; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + RowStrideReader reader = delegate.rowStrideReader(context); + if (reader == null) { + return null; + } + return new RowStrideReader() { + @Override + public void read(int docId, StoredFields storedFields, Builder builder) throws IOException { + reader.read(docId, storedFields, builder); + } + + @Override + public boolean canReuse(int startingDocID) { + return reader.canReuse(startingDocID); + } + + @Override + public String toString() { + return "Delegating[to=" + delegatingTo() + ", impl=" + reader + "]"; + } + }; + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return delegate.rowStrideStoredFieldSpec(); + } + + @Override + public boolean supportsOrdinals() { + return delegate.supportsOrdinals(); + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { + return delegate.ordinals(context); + } + + protected abstract String delegatingTo(); + + @Override + public final String toString() { + return "Delegating[to=" + delegatingTo() + ", impl=" + delegate + "]"; + } + } + + /** + * A list of documents to load. Documents are always in non-decreasing order. */ interface Docs { int count(); @@ -55,7 +325,7 @@ interface Docs { * production code. That implementation sits in the "compute" project. The is * also a test implementation, but there may be no more other implementations. */ - interface BuilderFactory { + interface BlockFactory { /** * Build a builder to load booleans as loaded from doc values. Doc values * load booleans deduplicated and in sorted order. @@ -112,11 +382,21 @@ interface BuilderFactory { LongBuilder longs(int expectedCount); /** - * Build a builder that can only load null values. - * TODO this should return a block directly instead of a builder + * Build a builder to load only {@code null}s. */ Builder nulls(int expectedCount); + /** + * Build a block that contains only {@code null}. + */ + Block constantNulls(); + + /** + * Build a block that contains {@code value} repeated + * {@code size} times. + */ + Block constantBytes(BytesRef value); + /** * Build a reader for reading keyword ordinals. */ @@ -129,7 +409,7 @@ interface BuilderFactory { * Marker interface for block results. The compute engine has a fleshed * out implementation. */ - interface Block {} + interface Block extends Releasable {} /** * A builder for typed values. For each document you may either call diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java new file mode 100644 index 0000000000000..0090935f51bc3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; +import org.elasticsearch.search.lookup.Source; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class BlockLoaderStoredFieldsFromLeafLoader implements BlockLoader.StoredFields { + private final LeafStoredFieldLoader loader; + private final SourceLoader.Leaf sourceLoader; + private Source source; + + public BlockLoaderStoredFieldsFromLeafLoader(LeafStoredFieldLoader loader, SourceLoader.Leaf sourceLoader) { + this.loader = loader; + this.sourceLoader = sourceLoader; + } + + public void advanceTo(int doc) throws IOException { + loader.advanceTo(doc); + if (sourceLoader != null) { + source = sourceLoader.source(loader, doc); + } + } + + @Override + public Source source() { + return source; + } + + @Override + public String id() { + return loader.id(); + } + + @Override + public String routing() { + return loader.routing(); + } + + @Override + public Map> storedFields() { + return loader.storedFields(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java index 1261a3612d3cb..12b5ff0e82a03 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java @@ -8,174 +8,34 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; -import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; -import org.elasticsearch.search.lookup.Source; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Set; /** * Loads values from {@code _source}. This whole process is very slow and cast-tastic, * so it doesn't really try to avoid megamorphic invocations. It's just going to be * slow. - * - * Note that this extends {@link BlockDocValuesReader} because it pretends to load - * doc values because, for now, ESQL only knows how to load things in a doc values - * order. */ -public abstract class BlockSourceReader extends BlockDocValuesReader { - /** - * Read {@code boolean}s from {@code _source}. - */ - public static BlockLoader booleans(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.booleans(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.BooleanBuilder) builder).appendBoolean((Boolean) v); - } - - @Override - public String toString() { - return "SourceBooleans"; - } - }; - } - - /** - * Read {@link BytesRef}s from {@code _source}. - */ - public static BlockLoader bytesRefs(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - BytesRef scratch = new BytesRef(); - - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, (String) v)); - } - - @Override - public String toString() { - return "SourceBytes"; - } - }; - } - - /** - * Read {@code double}s from {@code _source}. - */ - public static BlockLoader doubles(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.doubles(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.DoubleBuilder) builder).appendDouble(((Number) v).doubleValue()); - } - - @Override - public String toString() { - return "SourceDoubles"; - } - }; - } - - /** - * Read {@code int}s from {@code _source}. - */ - public static BlockLoader ints(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.ints(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.IntBuilder) builder).appendInt(((Number) v).intValue()); - } - - @Override - public String toString() { - return "SourceInts"; - } - }; - } - - /** - * Read {@code long}s from {@code _source}. - */ - public static BlockLoader longs(ValueFetcher fetcher) { - StoredFieldLoader loader = StoredFieldLoader.create(true, Set.of()); - return context -> new BlockSourceReader(fetcher, loader.getLoader(context, null)) { - @Override - public BlockLoader.Builder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.longs(expectedCount); - } - - @Override - protected void append(BlockLoader.Builder builder, Object v) { - ((BlockLoader.LongBuilder) builder).appendLong(((Number) v).longValue()); - } - - @Override - public String toString() { - return "SourceLongs"; - } - }; - } - +public abstract class BlockSourceReader implements BlockLoader.RowStrideReader { private final ValueFetcher fetcher; - private final LeafStoredFieldLoader loader; private final List ignoredValues = new ArrayList<>(); - private int docID = -1; - BlockSourceReader(ValueFetcher fetcher, LeafStoredFieldLoader loader) { + BlockSourceReader(ValueFetcher fetcher) { this.fetcher = fetcher; - this.loader = loader; - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) throws IOException { - try (BlockLoader.Builder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - int doc = docs.get(i); - if (doc < this.docID) { - throw new IllegalStateException("docs within same block must be in order"); - } - readValuesFromSingleDoc(doc, builder); - } - return builder.build(); - } } @Override - public void readValuesFromSingleDoc(int doc, BlockLoader.Builder builder) throws IOException { - this.docID = doc; - loader.advanceTo(doc); - List values = fetcher.fetchValues(Source.fromBytes(loader.source()), doc, ignoredValues); + public final void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { + List values = fetcher.fetchValues(storedFields.source(), docId, ignoredValues); ignoredValues.clear(); // TODO do something with these? - if (values == null) { + if (values == null || values.isEmpty()) { builder.appendNull(); return; } @@ -193,7 +53,213 @@ public void readValuesFromSingleDoc(int doc, BlockLoader.Builder builder) throws protected abstract void append(BlockLoader.Builder builder, Object v); @Override - public int docID() { - return docID; + public boolean canReuse(int startingDocID) { + return true; + } + + private abstract static class SourceBlockLoader implements BlockLoader { + @Override + public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + return null; + } + + @Override + public final StoredFieldsSpec rowStrideStoredFieldSpec() { + return StoredFieldsSpec.NEEDS_SOURCE; + } + + @Override + public final boolean supportsOrdinals() { + return false; + } + + @Override + public final SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + } + + public static class BooleansBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public BooleansBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.booleans(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Booleans(fetcher); + } + } + + private static class Booleans extends BlockSourceReader { + Booleans(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.BooleanBuilder) builder).appendBoolean((Boolean) v); + } + + @Override + public String toString() { + return "BlockSourceReader.Booleans"; + } + } + + public static class BytesRefsBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public BytesRefsBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new BytesRefs(fetcher); + } + } + + private static class BytesRefs extends BlockSourceReader { + BytesRef scratch = new BytesRef(); + + BytesRefs(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, (String) v)); + } + + @Override + public String toString() { + return "BlockSourceReader.Bytes"; + } + } + + public static class DoublesBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public DoublesBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.doubles(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Doubles(fetcher); + } + } + + private static class Doubles extends BlockSourceReader { + Doubles(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.DoubleBuilder) builder).appendDouble(((Number) v).doubleValue()); + } + + @Override + public String toString() { + return "BlockSourceReader.Doubles"; + } + } + + public static class IntsBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public IntsBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.ints(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Ints(fetcher); + } + } + + private static class Ints extends BlockSourceReader { + Ints(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.IntBuilder) builder).appendInt(((Number) v).intValue()); + } + + @Override + public String toString() { + return "BlockSourceReader.Ints"; + } + } + + public static class LongsBlockLoader extends SourceBlockLoader { + private final ValueFetcher fetcher; + + public LongsBlockLoader(ValueFetcher fetcher) { + this.fetcher = fetcher; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) { + return new Longs(fetcher); + } + } + + private static class Longs extends BlockSourceReader { + Longs(ValueFetcher fetcher) { + super(fetcher); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + ((BlockLoader.LongBuilder) builder).appendLong(((Number) v).longValue()); + } + + @Override + public String toString() { + return "BlockSourceReader.Longs"; + } + } + + /** + * Convert a {@link String} into a utf-8 {@link BytesRef}. + */ + static BytesRef toBytesRef(BytesRef scratch, String v) { + int len = UnicodeUtil.maxUTF8Length(v.length()); + if (scratch.bytes.length < len) { + scratch.bytes = new byte[len]; + } + scratch.length = UnicodeUtil.UTF16toUTF8(v, 0, v.length(), scratch.bytes); + return scratch; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java index 5984482fd9441..0a6cde773ff48 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java @@ -9,10 +9,11 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; -import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; import org.elasticsearch.index.mapper.BlockLoader.BytesRefBuilder; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import java.io.IOException; import java.util.List; @@ -27,86 +28,101 @@ * doc values because, for now, ESQL only knows how to load things in a doc values * order. */ -public abstract class BlockStoredFieldsReader extends BlockDocValuesReader { - public static BlockLoader bytesRefsFromBytesRefs(String field) { - StoredFieldLoader loader = StoredFieldLoader.create(false, Set.of(field)); - return context -> new Bytes(loader.getLoader(context, null), field) { - @Override - protected BytesRef toBytesRef(Object v) { - return (BytesRef) v; - } - }; +public abstract class BlockStoredFieldsReader implements BlockLoader.RowStrideReader { + @Override + public boolean canReuse(int startingDocID) { + return true; } - public static BlockLoader bytesRefsFromStrings(String field) { - StoredFieldLoader loader = StoredFieldLoader.create(false, Set.of(field)); - return context -> new Bytes(loader.getLoader(context, null), field) { - private final BytesRef scratch = new BytesRef(); + private abstract static class StoredFieldsBlockLoader implements BlockLoader { + protected final String field; - @Override - protected BytesRef toBytesRef(Object v) { - return toBytesRef(scratch, (String) v); - } - }; - } + StoredFieldsBlockLoader(String field) { + this.field = field; + } - public static BlockLoader id() { - StoredFieldLoader loader = StoredFieldLoader.create(false, Set.of(IdFieldMapper.NAME)); - return context -> new Id(loader.getLoader(context, null)); - } + @Override + public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return null; + } - private final LeafStoredFieldLoader loader; - private int docID = -1; + @Override + public final StoredFieldsSpec rowStrideStoredFieldSpec() { + return new StoredFieldsSpec(false, false, Set.of(field)); + } - protected BlockStoredFieldsReader(LeafStoredFieldLoader loader) { - this.loader = loader; - } + @Override + public final boolean supportsOrdinals() { + return false; + } - @Override - public final BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) throws IOException { - try (BlockLoader.Builder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - readValuesFromSingleDoc(docs.get(i), builder); - } - return builder.build(); + @Override + public final SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); } } - @Override - public final void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) throws IOException { - if (docId < this.docID) { - throw new IllegalStateException("docs within same block must be in order"); + /** + * Load {@link BytesRef} blocks from stored {@link BytesRef}s. + */ + public static class BytesFromBytesRefsBlockLoader extends StoredFieldsBlockLoader { + public BytesFromBytesRefsBlockLoader(String field) { + super(field); + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Bytes(field) { + @Override + protected BytesRef toBytesRef(Object v) { + return (BytesRef) v; + } + }; } - this.docID = docId; - loader.advanceTo(docId); - read(loader, builder); } - protected abstract void read(LeafStoredFieldLoader loader, BlockLoader.Builder builder) throws IOException; + /** + * Load {@link BytesRef} blocks from stored {@link String}s. + */ + public static class BytesFromStringsBlockLoader extends StoredFieldsBlockLoader { + public BytesFromStringsBlockLoader(String field) { + super(field); + } - @Override - public final int docID() { - return docID; + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Bytes(field) { + private final BytesRef scratch = new BytesRef(); + + @Override + protected BytesRef toBytesRef(Object v) { + return BlockSourceReader.toBytesRef(scratch, (String) v); + } + }; + } } private abstract static class Bytes extends BlockStoredFieldsReader { private final String field; - Bytes(LeafStoredFieldLoader loader, String field) { - super(loader); + Bytes(String field) { this.field = field; } - @Override - public BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); - } - protected abstract BytesRef toBytesRef(Object v); @Override - protected void read(LeafStoredFieldLoader loader, BlockLoader.Builder builder) throws IOException { - List values = loader.storedFields().get(field); + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { + List values = storedFields.storedFields().get(field); if (values == null) { builder.appendNull(); return; @@ -128,21 +144,31 @@ public String toString() { } } - private static class Id extends BlockStoredFieldsReader { - private final BytesRef scratch = new BytesRef(); - - Id(LeafStoredFieldLoader loader) { - super(loader); + /** + * Load {@link BytesRef} blocks from stored {@link String}s. + */ + public static class IdBlockLoader extends StoredFieldsBlockLoader { + public IdBlockLoader() { + super(IdFieldMapper.NAME); } @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { + public Builder builder(BlockFactory factory, int expectedCount) { return factory.bytesRefs(expectedCount); } @Override - protected void read(LeafStoredFieldLoader loader, BlockLoader.Builder builder) throws IOException { - ((BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, loader.id())); + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Id(); + } + } + + private static class Id extends BlockStoredFieldsReader { + private final BytesRef scratch = new BytesRef(); + + @Override + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { + ((BytesRefBuilder) builder).appendBytesRef(BlockSourceReader.toBytesRef(scratch, storedFields.id())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index a5793df3b82e0..7f175982dc28e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -257,9 +257,9 @@ public Boolean valueForDisplay(Object value) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.booleans(name()); + return new BlockDocValuesReader.BooleansBlockLoader(name()); } - return BlockSourceReader.booleans(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.BooleansBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java index b59df56791fbe..953e13dc69eb0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.BooleanFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for {@code boolean} scripts. */ public class BooleanScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(BooleanFieldScript.LeafFactory factory) { - return context -> new BooleanScriptBlockDocValuesReader(factory.newInstance(context)); + static class BooleanScriptBlockLoader extends DocValuesBlockLoader { + private final BooleanFieldScript.LeafFactory factory; + + BooleanScriptBlockLoader(BooleanFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.booleans(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new BooleanScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final BooleanFieldScript script; @@ -26,19 +43,14 @@ public static BlockLoader blockLoader(BooleanFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.BooleanBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { // Note that we don't emit falses before trues so we conform to the doc values contract and can use booleansFromDocValues - return factory.booleansFromDocValues(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BooleanBuilder builder = builder(factory, docs.count())) { + try (BlockLoader.BooleanBuilder builder = factory.booleans(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -47,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.BooleanBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java index 6e3876644567f..749bb279cfed4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java @@ -112,7 +112,7 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BooleanScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new BooleanScriptBlockDocValuesReader.BooleanScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 9d12fc6910d66..e90bea103c4cb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -775,9 +775,9 @@ public Function pointReaderIfPossible() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.longs(name()); + return new BlockDocValuesReader.LongsBlockLoader(name()); } - return BlockSourceReader.longs(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java index ad630a71870a4..a5303f27573eb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.DateFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for date scripts. */ public class DateScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(DateFieldScript.LeafFactory factory) { - return context -> new DateScriptBlockDocValuesReader(factory.newInstance(context)); + static class DateScriptBlockLoader extends DocValuesBlockLoader { + private final DateFieldScript.LeafFactory factory; + + DateScriptBlockLoader(DateFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new DateScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final DateFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(DateFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.LongBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.longs(expectedCount); // Note that we don't pre-sort our output so we can't use longsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't sort the values sort, so we can't use factory.longsFromDocValues + try (BlockLoader.LongBuilder builder = factory.longs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.LongBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java index 8252d571dce68..238f7488f6b54 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java @@ -181,7 +181,7 @@ public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return DateScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new DateScriptBlockDocValuesReader.DateScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 996c6243064e9..17af6259ca27c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -613,8 +613,12 @@ private static void postProcessDynamicArrayMapping(DocumentParserContext context || context.isCopyToField(fullFieldName) || mappers.size() < MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING || mappers.size() > MAX_DIMS_COUNT + // Anything that is NOT a number or anything that IS a number but not mapped to `float` should NOT be mapped to dense_vector || mappers.stream() - .allMatch(m -> m instanceof NumberFieldMapper.Builder nb && nb.type != NumberFieldMapper.NumberType.FLOAT)) { + .anyMatch( + m -> m instanceof NumberFieldMapper.Builder == false + || ((NumberFieldMapper.Builder) m).type != NumberFieldMapper.NumberType.FLOAT + )) { return; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java index 4e317a3ed11cb..a98f5ff661a78 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.DoubleFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for {@code double} scripts. */ public class DoubleScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(DoubleFieldScript.LeafFactory factory) { - return context -> new DoubleScriptBlockDocValuesReader(factory.newInstance(context)); + static class DoubleScriptBlockLoader extends DocValuesBlockLoader { + private final DoubleFieldScript.LeafFactory factory; + + DoubleScriptBlockLoader(DoubleFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.doubles(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new DoubleScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final DoubleFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(DoubleFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.DoubleBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.doubles(expectedCount); // Note that we don't pre-sort our output so we can't use doublesFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.DoubleBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't sort the values sort, so we can't use factory.doublesFromDocValues + try (BlockLoader.DoubleBuilder builder = factory.doubles(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.DoubleBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java index ef5c112ef212a..c3f7e782c219a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java @@ -107,7 +107,7 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return DoubleScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new DoubleScriptBlockDocValuesReader.DoubleScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index b9ba0762e5117..2b4eec2bdd565 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -204,4 +204,11 @@ Set sourcePaths(String field) { return fieldToCopiedFields.containsKey(resolvedField) ? fieldToCopiedFields.get(resolvedField) : Set.of(resolvedField); } + + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + public String parentField(String field) { + return fullSubfieldNameToParentPath.get(field); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 5f987fd96ca66..1b2667fe9d2ea 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -80,42 +80,7 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - // TODO build a constant block directly - BytesRef bytes = new BytesRef(blContext.indexName()); - return context -> new BlockDocValuesReader() { - private int docId; - - @Override - public int docID() { - return docId; - } - - @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { - for (int i = 0; i < docs.count(); i++) { - builder.appendBytesRef(bytes); - } - return builder.build(); - } - } - - @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { - this.docId = docId; - ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(bytes); - } - - @Override - public String toString() { - return "Index"; - } - }; + return BlockLoader.constantBytes(new BytesRef(blContext.indexName())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 80fd384f15fb7..56a50c2dee0aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -408,7 +408,7 @@ public static Query rangeQuery( @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.bytesRefsFromOrds(name()); + return new BlockDocValuesReader.BytesRefsFromOrdsBlockLoader(name()); } return null; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java index 23229a6533cdb..ff063555ff05d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.IpFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for keyword scripts. */ public class IpScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(IpFieldScript.LeafFactory factory) { - return context -> new IpScriptBlockDocValuesReader(factory.newInstance(context)); + static class IpScriptBlockLoader extends DocValuesBlockLoader { + private final IpFieldScript.LeafFactory factory; + + IpScriptBlockLoader(IpFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new IpScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final IpFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(IpFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues + try (BlockLoader.BytesRefBuilder builder = factory.bytesRefs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.BytesRefBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java index 0e56b30e2d5d9..4a64184d5d164 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java @@ -211,6 +211,6 @@ private Query cidrQuery(String term, SearchExecutionContext context) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return IpScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new IpScriptBlockDocValuesReader.IpScriptBlockLoader(leafFactory(blContext.lookup())); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index f15bb0069570f..b62113a586bba 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -580,7 +580,7 @@ NamedAnalyzer normalizer() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { - return BlockDocValuesReader.bytesRefsFromOrds(name()); + return new BlockDocValuesReader.BytesRefsFromOrdsBlockLoader(name()); } if (isSyntheticSource) { if (false == isStored()) { @@ -590,9 +590,9 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { + "] is only supported in synthetic _source index if it creates doc values or stored fields" ); } - return BlockStoredFieldsReader.bytesRefsFromBytesRefs(name()); + return new BlockStoredFieldsReader.BytesFromBytesRefsBlockLoader(name()); } - return BlockSourceReader.bytesRefs(sourceValueFetcher(blContext.sourcePaths(name()))); + return new BlockSourceReader.BytesRefsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); } @Override @@ -822,6 +822,10 @@ public void validateMatchedRoutingPath(final String routingPath) { ); } } + + public boolean hasNormalizer() { + return normalizer != Lucene.KEYWORD_ANALYZER; + } } private final boolean indexed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java index 6afbcae50d31f..df5ba51755c2a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptBlockDocValuesReader.java @@ -8,15 +8,32 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.script.StringFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for keyword scripts. */ public class KeywordScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(StringFieldScript.LeafFactory factory) { - return context -> new KeywordScriptBlockDocValuesReader(factory.newInstance(context)); + static class KeywordScriptBlockLoader extends DocValuesBlockLoader { + private final StringFieldScript.LeafFactory factory; + + KeywordScriptBlockLoader(StringFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new KeywordScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final BytesRefBuilder bytesBuild = new BytesRefBuilder(); @@ -28,18 +45,14 @@ public static BlockLoader blockLoader(StringFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.BytesRefBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.BytesRefBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't pre-sort our output so we can't use bytesRefsFromDocValues + try (BlockLoader.BytesRefBuilder builder = factory.bytesRefs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -48,7 +61,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.BytesRefBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java index 879a28d4c76c8..188f0ae508fcc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java @@ -112,7 +112,7 @@ public Object valueForDisplay(Object value) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return KeywordScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new KeywordScriptBlockDocValuesReader.KeywordScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java index 91c099cd2813b..73ad359147571 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptBlockDocValuesReader.java @@ -8,14 +8,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.LongFieldScript; +import java.io.IOException; + /** * {@link BlockDocValuesReader} implementation for {@code long} scripts. */ public class LongScriptBlockDocValuesReader extends BlockDocValuesReader { - public static BlockLoader blockLoader(LongFieldScript.LeafFactory factory) { - return context -> new LongScriptBlockDocValuesReader(factory.newInstance(context)); + static class LongScriptBlockLoader extends DocValuesBlockLoader { + private final LongFieldScript.LeafFactory factory; + + LongScriptBlockLoader(LongFieldScript.LeafFactory factory) { + this.factory = factory; + } + + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.longs(expectedCount); + } + + @Override + public AllReader reader(LeafReaderContext context) throws IOException { + return new LongScriptBlockDocValuesReader(factory.newInstance(context)); + } } private final LongFieldScript script; @@ -26,18 +43,14 @@ public static BlockLoader blockLoader(LongFieldScript.LeafFactory factory) { } @Override - public int docID() { + public int docId() { return docId; } @Override - public BlockLoader.LongBuilder builder(BlockLoader.BuilderFactory factory, int expectedCount) { - return factory.longs(expectedCount); // Note that we don't pre-sort our output so we can't use longsFromDocValues - } - - @Override - public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoader.Docs docs) { - try (BlockLoader.LongBuilder builder = builder(factory, docs.count())) { + public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { + // Note that we don't pre-sort our output so we can't use longsFromDocValues + try (BlockLoader.LongBuilder builder = factory.longs(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(docs.get(i), builder); } @@ -46,7 +59,7 @@ public BlockLoader.Block readValues(BlockLoader.BuilderFactory factory, BlockLoa } @Override - public void readValuesFromSingleDoc(int docId, BlockLoader.Builder builder) { + public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { this.docId = docId; read(docId, (BlockLoader.LongBuilder) builder); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java index f89babe32d0a9..f099ee3625922 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java @@ -107,7 +107,7 @@ public DocValueFormat docValueFormat(String format, ZoneId timeZone) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return LongScriptBlockDocValuesReader.blockLoader(leafFactory(blContext.lookup())); + return new LongScriptBlockDocValuesReader.LongScriptBlockLoader(leafFactory(blContext.lookup())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index b68bb1a2b1987..376cb1a10e2e6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -658,6 +658,11 @@ public interface BlockLoaderContext { * Find the paths in {@code _source} that contain values for the field named {@code name}. */ Set sourcePaths(String name); + + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + String parentField(String field); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index deaac37508511..cbf2dd872da2f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -707,7 +707,7 @@ public boolean isMultiField(String field) { */ public synchronized List reloadSearchAnalyzers(AnalysisRegistry registry, @Nullable String resource, boolean preview) throws IOException { - logger.info("reloading search analyzers"); + logger.debug("reloading search analyzers for index [{}]", indexSettings.getIndex().getName()); // TODO this should bust the cache somehow. Tracked in https://github.com/elastic/elasticsearch/issues/66722 return indexAnalyzers.reload(registry, indexSettings, resource, preview); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 7c44f33fbafa5..4880ce5edc204 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -404,6 +404,13 @@ public Set sourcePaths(String field) { return fieldTypesLookup().sourcePaths(field); } + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + public String parentField(String field) { + return fieldTypesLookup().parentField(field); + } + /** * Returns true if the index has mappings. An index does not have mappings only if it was created * without providing mappings explicitly, and no documents have yet been indexed in it. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 84e9e84fb8ceb..091e3c61764b0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -440,12 +440,12 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.doubles(fieldName, l -> HalfFloatPoint.sortableShortToHalfFloat((short) l)); + return new BlockDocValuesReader.DoublesBlockLoader(fieldName, l -> HalfFloatPoint.sortableShortToHalfFloat((short) l)); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.doubles(sourceValueFetcher); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); } }, FLOAT("float", NumericType.FLOAT) { @@ -602,12 +602,12 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.doubles(fieldName, l -> NumericUtils.sortableIntToFloat((int) l)); + return new BlockDocValuesReader.DoublesBlockLoader(fieldName, l -> NumericUtils.sortableIntToFloat((int) l)); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.doubles(sourceValueFetcher); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); } }, DOUBLE("double", NumericType.DOUBLE) { @@ -742,12 +742,12 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.doubles(fieldName, NumericUtils::sortableLongToDouble); + return new BlockDocValuesReader.DoublesBlockLoader(fieldName, NumericUtils::sortableLongToDouble); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.doubles(sourceValueFetcher); + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); } }, BYTE("byte", NumericType.BYTE) { @@ -845,12 +845,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.ints(fieldName); + return new BlockDocValuesReader.IntsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.ints(sourceValueFetcher); + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); } }, SHORT("short", NumericType.SHORT) { @@ -944,12 +944,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.ints(fieldName); + return new BlockDocValuesReader.IntsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.ints(sourceValueFetcher); + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); } }, INTEGER("integer", NumericType.INT) { @@ -1111,12 +1111,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.ints(fieldName); + return new BlockDocValuesReader.IntsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.ints(sourceValueFetcher); + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); } }, LONG("long", NumericType.LONG) { @@ -1248,12 +1248,12 @@ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String fieldName, String @Override BlockLoader blockLoaderFromDocValues(String fieldName) { - return BlockDocValuesReader.longs(fieldName); + return new BlockDocValuesReader.LongsBlockLoader(fieldName); } @Override BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return BlockSourceReader.longs(sourceValueFetcher); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher); } }; @@ -1656,7 +1656,7 @@ public Function pointReaderIfPossible() { public BlockLoader blockLoader(BlockLoaderContext blContext) { if (indexMode == IndexMode.TIME_SERIES && metricType == TimeSeriesParams.MetricType.COUNTER) { // Counters are not supported by ESQL so we load them in null - return BlockDocValuesReader.nulls(); + return BlockLoader.CONSTANT_NULLS; } if (hasDocValues()) { return type.blockLoaderFromDocValues(name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java index f681d54ebbead..d8a4177ee3211 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ProvidedIdFieldMapper.java @@ -119,7 +119,7 @@ public Query termsQuery(Collection values, SearchExecutionContext context) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BlockStoredFieldsReader.id(); + return new BlockStoredFieldsReader.IdBlockLoader(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java new file mode 100644 index 0000000000000..63455379044f7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.elasticsearch.search.fetch.StoredFieldsSpec; + +import java.io.IOException; +import java.util.Set; + +/** + * Load {@code _source} into blocks. + */ +public final class SourceFieldBlockLoader implements BlockLoader { + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return null; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Source(); + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return new StoredFieldsSpec(true, false, Set.of()); + } + + @Override + public boolean supportsOrdinals() { + return false; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + + private static class Source extends BlockStoredFieldsReader { + @Override + public void read(int docId, StoredFields storedFields, Builder builder) throws IOException { + // TODO support appending BytesReference + ((BytesRefBuilder) builder).appendBytesRef(storedFields.source().internalSourceRef().toBytesRef()); + } + + @Override + public String toString() { + return "BlockStoredFieldsReader.Source"; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 42121147d7f09..958db80ae64c2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -192,9 +192,11 @@ private IndexMode getIndexMode() { ); static final class SourceFieldType extends MappedFieldType { + private final boolean enabled; private SourceFieldType(boolean enabled) { super(NAME, false, enabled, false, TextSearchInfo.NONE, Collections.emptyMap()); + this.enabled = enabled; } @Override @@ -216,6 +218,14 @@ public Query existsQuery(SearchExecutionContext context) { public Query termQuery(Object value, SearchExecutionContext context) { throw new QueryShardException(context, "The _source field is not searchable"); } + + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + if (enabled) { + return new SourceFieldBlockLoader(); + } + return BlockLoader.CONSTANT_NULLS; + } } // nullable for bwc reasons diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 1949249b9be2d..1ae0489173ce3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -678,6 +678,7 @@ public TextFieldType( super(name, indexed, stored, false, tsi, meta); fielddata = false; this.isSyntheticSource = isSyntheticSource; + // TODO block loader could use a "fast loading" delegate which isn't always the same - but frequently is. this.syntheticSourceDelegate = syntheticSourceDelegate; this.eagerGlobalOrdinals = eagerGlobalOrdinals; this.indexPhrases = indexPhrases; @@ -939,25 +940,46 @@ public boolean isAggregatable() { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { if (syntheticSourceDelegate != null) { - return syntheticSourceDelegate.blockLoader(blContext); + return new BlockLoader.Delegating(syntheticSourceDelegate.blockLoader(blContext)) { + @Override + protected String delegatingTo() { + return syntheticSourceDelegate.name(); + } + }; } - if (isSyntheticSource) { - if (isStored()) { - return BlockStoredFieldsReader.bytesRefsFromStrings(name()); + /* + * If this is a sub-text field try and return the parent's loader. Text + * fields will always be slow to load and if the parent is exact then we + * should use that instead. + */ + String parentField = blContext.parentField(name()); + if (parentField != null) { + MappedFieldType parent = blContext.lookup().fieldType(parentField); + if (parent.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { + KeywordFieldMapper.KeywordFieldType kwd = (KeywordFieldMapper.KeywordFieldType) parent; + if (kwd.hasNormalizer() == false && (kwd.hasDocValues() || kwd.isStored())) { + return new BlockLoader.Delegating(kwd.blockLoader(blContext)) { + @Override + protected String delegatingTo() { + return kwd.name(); + } + }; + } } + } + if (isStored()) { + return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(name()); + } + if (isSyntheticSource) { /* - * We *shouldn't fall to this exception. The mapping should be - * rejected because we've enabled synthetic source but not configured - * the index properly. But we give it a nice message anyway just in - * case. + * When we're in synthetic source mode we don't currently + * support text fields that are not stored and are not children + * of perfect keyword fields. We'd have to load from the parent + * field and then convert the result to a string. */ - throw new IllegalArgumentException( - "fetching values from a text field [" - + name() - + "] is supported because synthetic _source is enabled and we don't have a way to load the fields" - ); + return null; } - return BlockSourceReader.bytesRefs(SourceValueFetcher.toString(blContext.sourcePaths(name()))); + return new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(blContext.sourcePaths(name()))); } @Override @@ -1019,7 +1041,7 @@ protected BytesRef storedToBytesRef(Object stored) { throw new IllegalArgumentException( "fetching values from a text field [" + name() - + "] is supported because synthetic _source is enabled and we don't have a way to load the fields" + + "] is not supported because synthetic _source is enabled and we don't have a way to load the fields" ); } return new SourceValueFetcherSortedBinaryIndexFieldData.Builder( @@ -1034,6 +1056,10 @@ protected BytesRef storedToBytesRef(Object stored) { public boolean isSyntheticSource() { return isSyntheticSource; } + + KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate() { + return syntheticSourceDelegate; + } } public static class ConstantScoreTextFieldType extends TextFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java index 9d43ef398feac..9245e78602eb7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java @@ -89,7 +89,7 @@ public Query termsQuery(Collection values, SearchExecutionContext context) { @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BlockStoredFieldsReader.id(); + return new BlockStoredFieldsReader.IdBlockLoader(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index 54a44dd55caa4..8f69f6afe47db 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -56,7 +56,7 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - return BlockDocValuesReader.longs(name()); + return new BlockDocValuesReader.LongsBlockLoader(name()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java index 51be6290df657..c15adfb3be116 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java @@ -113,10 +113,6 @@ private static class Defaults { public static final int DEPTH_LIMIT = 20; } - private static FlattenedFieldMapper toType(FieldMapper in) { - return (FlattenedFieldMapper) in; - } - private static Builder builder(Mapper in) { return ((FlattenedFieldMapper) in).builder; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java index a79b37796bbe9..ba1e27dc1a0aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java @@ -188,10 +188,6 @@ private KeyValue(final String key, final String value) { this(value, new Prefix(key), new Suffix(key)); } - public Prefix prefix() { - return this.prefix; - } - public Suffix suffix() { return this.suffix; } diff --git a/server/src/main/java/org/elasticsearch/index/query/Operator.java b/server/src/main/java/org/elasticsearch/index/query/Operator.java index 45e7cbc76f891..20f843df04651 100644 --- a/server/src/main/java/org/elasticsearch/index/query/Operator.java +++ b/server/src/main/java/org/elasticsearch/index/query/Operator.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.CollectionUtils; import java.io.IOException; import java.util.Locale; @@ -48,9 +47,4 @@ public static Operator fromString(String op) { return valueOf(op.toUpperCase(Locale.ROOT)); } - private static IllegalArgumentException newOperatorException(String op) { - return new IllegalArgumentException( - "operator needs to be either " + CollectionUtils.arrayAsArrayList(values()) + ", but not [" + op + "]" - ); - } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index c4806dbd3a0a8..143dfe7fe6e9d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -334,6 +334,13 @@ public Set sourcePath(String fullName) { return mappingLookup.sourcePaths(fullName); } + /** + * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. + */ + public String parentPath(String field) { + return mappingLookup.parentField(field); + } + /** * Will there be {@code _source}. */ diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java index 3a7f316de6f40..7f6465e9bce8a 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java @@ -9,14 +9,14 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.ElasticsearchClient; public class DeleteByQueryRequestBuilder extends AbstractBulkByScrollRequestBuilder { public DeleteByQueryRequestBuilder(ElasticsearchClient client, ActionType action) { - this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE)); + this(client, action, new SearchRequestBuilder(client, TransportSearchAction.TYPE)); } private DeleteByQueryRequestBuilder(ElasticsearchClient client, ActionType action, SearchRequestBuilder search) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java index 68a0b948ef32d..e79d06ceba0b5 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java @@ -11,8 +11,8 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.ElasticsearchClient; public class ReindexRequestBuilder extends AbstractBulkIndexByScrollRequestBuilder { @@ -22,7 +22,7 @@ public ReindexRequestBuilder(ElasticsearchClient client, ActionType { public UpdateByQueryRequestBuilder(ElasticsearchClient client, ActionType action) { - this(client, action, new SearchRequestBuilder(client, SearchAction.INSTANCE)); + this(client, action, new SearchRequestBuilder(client, TransportSearchAction.TYPE)); } private UpdateByQueryRequestBuilder(ElasticsearchClient client, ActionType action, SearchRequestBuilder search) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 2e39b13b34c78..0b3b15670ef78 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; @@ -466,13 +467,13 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { synchronized (retentionLeasePersistenceLock) { retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); } + return emptyIfNull(retentionLeases); + } - // TODO after backporting we expect this never to happen in 8.x, so adjust this to throw an exception instead. - assert Version.CURRENT.major <= 8 : "throw an exception instead of returning EMPTY on null"; - if (retentionLeases == null) { - return RetentionLeases.EMPTY; - } - return retentionLeases; + @UpdateForV9 + private static RetentionLeases emptyIfNull(RetentionLeases retentionLeases) { + // we expect never to see a null in 8.x, so adjust this to throw an exception from v9 onwards. + return retentionLeases == null ? RetentionLeases.EMPTY : retentionLeases; } private final Object retentionLeasePersistenceLock = new Object(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f4812f280f917..d7d67b3af159e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -285,6 +285,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final LongSupplier relativeTimeInNanosSupplier; private volatile long startedRelativeTimeInNanos; private volatile long indexingTimeBeforeShardStartedInNanos; + private final SubscribableListener waitForEngineOrClosedShardListeners = new SubscribableListener<>(); // the translog keeps track of the GCP, but unpromotable shards have no translog so we need to track the GCP here instead private volatile long globalCheckPointIfUnpromotable; @@ -1392,22 +1393,38 @@ public BulkStats bulkStats() { * If false is returned, no flush happened. */ public boolean flush(FlushRequest request) { + PlainActionFuture future = new PlainActionFuture<>(); + flush(request, future); + return future.actionGet(); + } + + /** + * Executes the given flush request against the engine. + * + * @param request the flush request + * @param listener to notify after full durability has been achieved. + * false if waitIfOngoing==false + * and an ongoing request is detected, else true. + * If false is returned, no flush happened. + */ + public void flush(FlushRequest request, ActionListener listener) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); - /* - * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under - * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer - * since we use Engine#writeIndexingBuffer for this now. - */ - verifyNotClosed(); - final long time = System.nanoTime(); - // TODO: Transition this method to async to support async flush - PlainActionFuture future = PlainActionFuture.newFuture(); - getEngine().flush(force, waitIfOngoing, future); - Engine.FlushResult flushResult = future.actionGet(); - flushMetric.inc(System.nanoTime() - time); - return flushResult.flushPerformed(); + ActionListener.run(listener, l -> { + /* + * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under + * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer + * since we use Engine#writeIndexingBuffer for this now. + */ + verifyNotClosed(); + final long startTime = System.nanoTime(); + getEngine().flush( + force, + waitIfOngoing, + ActionListener.runBefore(l.map(Engine.FlushResult::flushPerformed), () -> flushMetric.inc(System.nanoTime() - startTime)) + ); + }); } /** @@ -1658,6 +1675,7 @@ public void close(String reason, boolean flushEngine) throws IOException { synchronized (mutex) { changeState(IndexShardState.CLOSED, reason); } + checkAndCallWaitForEngineOrClosedShardListeners(); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); try { @@ -2004,7 +2022,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). - final Engine newEngine = engineFactory.newReadWriteEngine(config); + final Engine newEngine = createEngine(config); onNewEngine(newEngine); currentEngineReference.set(newEngine); // We set active because we are now writing operations to the engine; this way, @@ -2016,6 +2034,23 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t onSettingsChanged(); assert assertSequenceNumbersInCommit(); recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); + checkAndCallWaitForEngineOrClosedShardListeners(); + } + + // awful hack to work around problem in CloseFollowerIndexIT + static boolean suppressCreateEngineErrors; + + private Engine createEngine(EngineConfig config) { + if (suppressCreateEngineErrors) { + try { + return engineFactory.newReadWriteEngine(config); + } catch (Error e) { + ExceptionsHelper.maybeDieOnAnotherThread(e); + throw new RuntimeException("rethrowing suppressed error", e); + } + } else { + return engineFactory.newReadWriteEngine(config); + } } private boolean assertSequenceNumbersInCommit() throws IOException { @@ -2272,25 +2307,26 @@ public void flushOnIdle(long inactiveTimeNS) { boolean wasActive = active.getAndSet(false); if (wasActive) { logger.debug("flushing shard on inactive"); - threadPool.executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (state != IndexShardState.CLOSED) { - active.set(true); - logger.warn("failed to flush shard on inactive", e); + threadPool.executor(ThreadPool.Names.FLUSH) + .execute(() -> flush(new FlushRequest().waitIfOngoing(false).force(false), new ActionListener<>() { + @Override + public void onResponse(Boolean flushed) { + if (flushed == false) { + // In case an ongoing flush was detected, revert active flag so that a next flushOnIdle request + // will retry (#87888) + active.set(true); + } + periodicFlushMetric.inc(); } - } - @Override - protected void doRun() { - if (flush(new FlushRequest().waitIfOngoing(false).force(false)) == false) { - // In case an ongoing flush was detected, revert active flag so that a next flushOnIdle request - // will retry (#87888) - active.set(true); + @Override + public void onFailure(Exception e) { + if (state != IndexShardState.CLOSED) { + active.set(true); + logger.warn("failed to flush shard on inactive", e); + } } - periodicFlushMetric.inc(); - } - }); + })); } } } @@ -3737,27 +3773,23 @@ public void afterWriteOperation() { */ if (shouldPeriodicallyFlush()) { logger.debug("submitting async flush request"); - final AbstractRunnable flush = new AbstractRunnable() { - @Override - public void onFailure(final Exception e) { - if (state != IndexShardState.CLOSED) { - logger.warn("failed to flush index", e); + threadPool.executor(ThreadPool.Names.FLUSH).execute(() -> { + flush(new FlushRequest(), new ActionListener<>() { + @Override + public void onResponse(Boolean flushed) { + periodicFlushMetric.inc(); } - } - - @Override - protected void doRun() { - flush(new FlushRequest()); - periodicFlushMetric.inc(); - } - @Override - public void onAfter() { - flushOrRollRunning.compareAndSet(true, false); - afterWriteOperation(); - } - }; - threadPool.executor(ThreadPool.Names.FLUSH).execute(flush); + @Override + public void onFailure(Exception e) { + if (state != IndexShardState.CLOSED) { + logger.warn("failed to flush index", e); + } + } + }); + flushOrRollRunning.compareAndSet(true, false); + afterWriteOperation(); + }); } else if (shouldRollTranslogGeneration()) { logger.debug("submitting async roll translog generation request"); final AbstractRunnable roll = new AbstractRunnable() { @@ -3820,16 +3852,18 @@ && isSearchIdle() // lets skip this refresh since we are search idle and // don't necessarily need to refresh. the next searcher access will register a refreshListener and that will // cause the next schedule to refresh. + logger.trace("scheduledRefresh: search-idle, skipping refresh"); engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some setRefreshPending(engine); l.onResponse(false); return; } else { - logger.trace("refresh with source [schedule]"); + logger.trace("scheduledRefresh: refresh with source [schedule]"); engine.maybeRefresh("schedule", l.map(Engine.RefreshResult::refreshed)); return; } } + logger.trace("scheduledRefresh: no refresh needed"); engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some l.onResponse(false); }); @@ -3925,7 +3959,7 @@ public final void ensureShardSearchActive(Consumer listener) { // a refresh can be a costly operation, so we should fork to a refresh thread to be safe: threadPool.executor(ThreadPool.Names.REFRESH).execute(() -> { if (location == pendingRefreshLocation.get()) { - getEngine().maybeRefresh("ensure-shard-search-active", PlainActionFuture.newFuture()); + getEngine().maybeRefresh("ensure-shard-search-active", new PlainActionFuture<>()); } }); } @@ -4181,10 +4215,28 @@ public void waitForSegmentGeneration(long segmentGeneration, ActionListener listener) { + waitForEngineOrClosedShardListeners.addListener(listener); + } + /** * Registers a listener for an event when the shard advances to the provided primary term and segment generation */ public void waitForPrimaryTermAndGeneration(long primaryTerm, long segmentGeneration, ActionListener listener) { - getEngine().addPrimaryTermAndGenerationListener(primaryTerm, segmentGeneration, listener); + waitForEngineOrClosedShard( + listener.delegateFailureAndWrap( + (l, ignored) -> getEngine().addPrimaryTermAndGenerationListener(primaryTerm, segmentGeneration, l) + ) + ); } + } diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index ded3ffa4ebcc0..bc5a4b02116a7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -31,12 +31,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; @@ -48,6 +48,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -308,7 +309,7 @@ void recoverFromRepository(final IndexShard indexShard, Repository repository, A RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); assert recoveryType == RecoverySource.Type.SNAPSHOT : "expected snapshot recovery type: " + recoveryType; SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource(); - restore(indexShard, repository, recoverySource, recoveryListener(indexShard, listener)); + restore(indexShard, repository, recoverySource, recoveryListener(indexShard, listener).map(ignored -> true)); } else { listener.onResponse(false); } @@ -408,15 +409,19 @@ private ActionListener recoveryListener(IndexShard indexShard, ActionLi * Recovers the state of the shard from the store. */ private void internalRecoverFromStore(IndexShard indexShard, ActionListener outerListener) { - indexShard.preRecovery(outerListener.delegateFailureAndWrap((listener, ignored) -> { - final RecoveryState recoveryState = indexShard.recoveryState(); - final boolean indexShouldExists = recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE; - indexShard.prepareForIndexRecovery(); - SegmentInfos si = null; - final Store store = indexShard.store(); - store.incRef(); - boolean triggeredPostRecovery = false; - try { + final List releasables = new ArrayList<>(1); + SubscribableListener + + .newForked(indexShard::preRecovery) + + .andThen((l, ignored) -> { + final RecoveryState recoveryState = indexShard.recoveryState(); + final boolean indexShouldExists = recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE; + indexShard.prepareForIndexRecovery(); + SegmentInfos si = null; + final Store store = indexShard.store(); + store.incRef(); + releasables.add(store::decRef); try { store.failIfCorrupted(); try { @@ -480,16 +485,16 @@ private void internalRecoverFromStore(IndexShard indexShard, ActionListener { + if (e instanceof IndexShardRecoveryException) { + l.onFailure(e); + } else { + l.onFailure(new IndexShardRecoveryException(shardId, "failed to recover from gateway", e)); } - } - })); + }), () -> Releasables.close(releasables))); } private static void writeEmptyRetentionLeasesFile(IndexShard indexShard) throws IOException { @@ -513,31 +518,24 @@ private void restore( IndexShard indexShard, Repository repository, SnapshotRecoverySource restoreSource, - ActionListener outerListener + ActionListener outerListener ) { logger.debug("restoring from {} ...", indexShard.recoveryState().getRecoverySource()); - indexShard.preRecovery(outerListener.delegateFailure((listener, ignored) -> { - final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); - if (restoreSource == null) { - listener.onFailure(new IndexShardRestoreFailedException(shardId, "empty restore source")); - return; - } - if (logger.isTraceEnabled()) { - logger.trace("[{}] restoring shard [{}]", restoreSource.snapshot(), shardId); - } - final ActionListener restoreListener = ActionListener.wrap(v -> { - indexShard.getIndexEventListener().afterFilesRestoredFromRepository(indexShard); - final Store store = indexShard.store(); - bootstrap(indexShard, store); - assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; - writeEmptyRetentionLeasesFile(indexShard); - indexShard.openEngineAndRecoverFromTranslog(); - indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); - indexShard.finalizeRecovery(); - indexShard.postRecovery("restore done", listener.map(voidValue -> true)); - }, e -> listener.onFailure(new IndexShardRestoreFailedException(shardId, "restore failed", e))); - try { + record ShardAndIndexIds(IndexId indexId, ShardId shardId) {} + + SubscribableListener + + .newForked(indexShard::preRecovery) + + .andThen((shardAndIndexIdsListener, ignored) -> { + final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); + if (restoreSource == null) { + throw new IndexShardRestoreFailedException(shardId, "empty restore source"); + } + if (logger.isTraceEnabled()) { + logger.trace("[{}] restoring shard [{}]", restoreSource.snapshot(), shardId); + } translogState.totalOperations(0); translogState.totalOperationsOnStart(0); indexShard.prepareForIndexRecovery(); @@ -548,37 +546,56 @@ private void restore( } else { snapshotShardId = new ShardId(indexId.getName(), IndexMetadata.INDEX_UUID_NA_VALUE, shardId.id()); } - final ListenableFuture indexIdListener = new ListenableFuture<>(); - // If the index UUID was not found in the recovery source we will have to load RepositoryData and resolve it by index name if (indexId.getId().equals(IndexMetadata.INDEX_UUID_NA_VALUE)) { - // BwC path, running against an old version master that did not add the IndexId to the recovery source + // BwC path, running against an old version master that did not add the IndexId to the recovery source. If the index + // UUID was not found in the recovery source we will have to load RepositoryData and resolve it by index name repository.getRepositoryData( // TODO no need to fork back to GENERIC if using cached repo data, see #101445 EsExecutors.DIRECT_EXECUTOR_SERVICE, new ThreadedActionListener<>( indexShard.getThreadPool().generic(), - indexIdListener.map(repositoryData -> repositoryData.resolveIndexId(indexId.getName())) + shardAndIndexIdsListener.map( + repositoryData -> new ShardAndIndexIds(repositoryData.resolveIndexId(indexId.getName()), snapshotShardId) + ) ) ); } else { - indexIdListener.onResponse(indexId); + shardAndIndexIdsListener.onResponse(new ShardAndIndexIds(indexId, snapshotShardId)); } + }) + + .andThen((restoreListener, shardAndIndexId) -> { assert indexShard.getEngineOrNull() == null; - indexIdListener.addListener(restoreListener.delegateFailureAndWrap((l, idx) -> { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC, ThreadPool.Names.SNAPSHOT); - repository.restoreShard( - indexShard.store(), - restoreSource.snapshot().getSnapshotId(), - idx, - snapshotShardId, - indexShard.recoveryState(), - l - ); - })); - } catch (Exception e) { - restoreListener.onFailure(e); - } - })); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC, ThreadPool.Names.SNAPSHOT); + repository.restoreShard( + indexShard.store(), + restoreSource.snapshot().getSnapshotId(), + shardAndIndexId.indexId(), + shardAndIndexId.shardId(), + indexShard.recoveryState(), + restoreListener + ); + }) + + .andThen((l, ignored) -> { + indexShard.getIndexEventListener().afterFilesRestoredFromRepository(indexShard); + final Store store = indexShard.store(); + bootstrap(indexShard, store); + assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; + writeEmptyRetentionLeasesFile(indexShard); + indexShard.openEngineAndRecoverFromTranslog(); + indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); + indexShard.finalizeRecovery(); + indexShard.postRecovery("restore done", l); + }) + + .addListener(outerListener.delegateResponse((l, e) -> { + if (e instanceof IndexShardRestoreFailedException) { + l.onFailure(e); + } else { + l.onFailure(new IndexShardRestoreFailedException(shardId, "restore failed", e)); + } + })); } public static void bootstrap(final IndexShard indexShard, final Store store) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 140c4684d1a70..451af25dfa649 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -61,7 +61,16 @@ public enum Stage { * where an abort could have occurred. */ public enum AbortStatus { + /** + * The shard snapshot got past the stage where an abort or pause could have occurred, and is either complete or on its way to + * completion. + */ NO_ABORT, + + /** + * The shard snapshot stopped before completion, either because the whole snapshot was aborted or because this node is to be + * removed. + */ ABORTED } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java b/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java new file mode 100644 index 0000000000000..7fb52bcd0be1c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +public class IndicesFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(IndicesService.SUPPORTS_AUTO_PUT, Version.V_8_8_0); + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index bcd5b6015df51..0faa66a9d21da 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -17,7 +17,6 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; @@ -75,6 +74,8 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.Index; @@ -137,7 +138,6 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -207,6 +207,8 @@ public class IndicesService extends AbstractLifecycleComponent Setting.Property.NodeScope ); + static final NodeFeature SUPPORTS_AUTO_PUT = new NodeFeature("indices.auto_put_supported"); + /** * The node's settings. */ @@ -226,6 +228,7 @@ public class IndicesService extends AbstractLifecycleComponent private final ScriptService scriptService; private final ClusterService clusterService; private final Client client; + private final FeatureService featureService; private volatile Map indices = Map.of(); private final Map> pendingDeletes = new HashMap<>(); private final AtomicInteger numUncompletedDeletes = new AtomicInteger(); @@ -268,59 +271,35 @@ protected void doStart() { } @SuppressWarnings("this-escape") - public IndicesService( - Settings settings, - PluginsService pluginsService, - NodeEnvironment nodeEnv, - NamedXContentRegistry xContentRegistry, - AnalysisRegistry analysisRegistry, - IndexNameExpressionResolver indexNameExpressionResolver, - MapperRegistry mapperRegistry, - NamedWriteableRegistry namedWriteableRegistry, - ThreadPool threadPool, - IndexScopedSettings indexScopedSettings, - CircuitBreakerService circuitBreakerService, - BigArrays bigArrays, - ScriptService scriptService, - ClusterService clusterService, - Client client, - MetaStateService metaStateService, - Collection>> engineFactoryProviders, - Map directoryFactories, - ValuesSourceRegistry valuesSourceRegistry, - Map recoveryStateFactories, - List indexFoldersDeletionListeners, - Map snapshotCommitSuppliers, - CheckedBiConsumer requestCacheKeyDifferentiator, - Supplier documentParsingObserverSupplier - ) { - this.settings = settings; - this.threadPool = threadPool; - this.pluginsService = pluginsService; - this.nodeEnv = nodeEnv; + IndicesService(IndicesServiceBuilder builder) { + this.settings = builder.settings; + this.threadPool = builder.threadPool; + this.pluginsService = builder.pluginsService; + this.nodeEnv = builder.nodeEnv; this.parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE) - .withRegistry(xContentRegistry); - this.valuesSourceRegistry = valuesSourceRegistry; + .withRegistry(builder.xContentRegistry); + this.valuesSourceRegistry = builder.valuesSourceRegistry; this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); - this.analysisRegistry = analysisRegistry; - this.indexNameExpressionResolver = indexNameExpressionResolver; + this.analysisRegistry = builder.analysisRegistry; + this.indexNameExpressionResolver = builder.indexNameExpressionResolver; this.indicesRequestCache = new IndicesRequestCache(settings); this.indicesQueryCache = new IndicesQueryCache(settings); - this.mapperRegistry = mapperRegistry; - this.namedWriteableRegistry = namedWriteableRegistry; - this.documentParsingObserverSupplier = documentParsingObserverSupplier; + this.mapperRegistry = builder.mapperRegistry; + this.namedWriteableRegistry = builder.namedWriteableRegistry; + this.documentParsingObserverSupplier = builder.documentParsingObserverSupplier; indexingMemoryController = new IndexingMemoryController( settings, threadPool, // ensure we pull an iter with new shards - flatten makes a copy () -> Iterables.flatten(this).iterator() ); - this.indexScopedSettings = indexScopedSettings; - this.circuitBreakerService = circuitBreakerService; - this.bigArrays = bigArrays; - this.scriptService = scriptService; - this.clusterService = clusterService; - this.client = client; + this.indexScopedSettings = builder.indexScopedSettings; + this.circuitBreakerService = builder.circuitBreakerService; + this.bigArrays = builder.bigArrays; + this.scriptService = builder.scriptService; + this.clusterService = builder.clusterService; + this.client = builder.client; + this.featureService = builder.featureService; this.idFieldDataEnabled = INDICES_ID_FIELD_DATA_ENABLED_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(INDICES_ID_FIELD_DATA_ENABLED_SETTING, this::setIdFieldDataEnabled); this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { @@ -336,21 +315,21 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon }); this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, threadPool, this.cleanInterval); - this.metaStateService = metaStateService; - this.engineFactoryProviders = engineFactoryProviders; + this.metaStateService = builder.metaStateService; + this.engineFactoryProviders = builder.engineFactoryProviders; // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : directoryFactories.keySet()) { + for (final String indexStoreType : builder.directoryFactories.keySet()) { if (IndexModule.isBuiltinType(indexStoreType)) { throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); } } - this.directoryFactories = directoryFactories; - this.recoveryStateFactories = recoveryStateFactories; - this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(indexFoldersDeletionListeners); - this.snapshotCommitSuppliers = snapshotCommitSuppliers; - this.requestCacheKeyDifferentiator = requestCacheKeyDifferentiator; + this.directoryFactories = builder.directoryFactories; + this.recoveryStateFactories = builder.recoveryStateFactories; + this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(builder.indexFoldersDeletionListeners); + this.snapshotCommitSuppliers = builder.snapshotCommitSuppliers; + this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -903,7 +882,7 @@ public void createShard( assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "mapping update consumer only required by local shards recovery"; client.execute( - clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.V_8_8_0) + featureService.clusterHasFeature(clusterService.state(), SUPPORTS_AUTO_PUT) ? AutoPutMappingAction.INSTANCE : PutMappingAction.INSTANCE, new PutMappingRequest().setConcreteIndex(shardRouting.index()) diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java new file mode 100644 index 0000000000000..a5cd00bb86094 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java @@ -0,0 +1,232 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.gateway.MetaStateService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.mapper.MapperRegistry; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.internal.DocumentParsingObserver; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class IndicesServiceBuilder { + Settings settings; + PluginsService pluginsService; + NodeEnvironment nodeEnv; + NamedXContentRegistry xContentRegistry; + AnalysisRegistry analysisRegistry; + IndexNameExpressionResolver indexNameExpressionResolver; + MapperRegistry mapperRegistry; + NamedWriteableRegistry namedWriteableRegistry; + ThreadPool threadPool; + IndexScopedSettings indexScopedSettings; + CircuitBreakerService circuitBreakerService; + BigArrays bigArrays; + ScriptService scriptService; + ClusterService clusterService; + Client client; + FeatureService featureService; + MetaStateService metaStateService; + Collection>> engineFactoryProviders = List.of(); + Map directoryFactories = Map.of(); + @Nullable + ValuesSourceRegistry valuesSourceRegistry; + Map recoveryStateFactories = Map.of(); + List indexFoldersDeletionListeners = List.of(); + Map snapshotCommitSuppliers = Map.of(); + @Nullable + CheckedBiConsumer requestCacheKeyDifferentiator; + Supplier documentParsingObserverSupplier; + + public IndicesServiceBuilder settings(Settings settings) { + this.settings = settings; + return this; + } + + public IndicesServiceBuilder pluginsService(PluginsService pluginsService) { + this.pluginsService = pluginsService; + return this; + } + + public IndicesServiceBuilder nodeEnvironment(NodeEnvironment nodeEnv) { + this.nodeEnv = nodeEnv; + return this; + } + + public IndicesServiceBuilder xContentRegistry(NamedXContentRegistry xContentRegistry) { + this.xContentRegistry = xContentRegistry; + return this; + } + + public IndicesServiceBuilder analysisRegistry(AnalysisRegistry analysisRegistry) { + this.analysisRegistry = analysisRegistry; + return this; + } + + public IndicesServiceBuilder indexNameExpressionResolver(IndexNameExpressionResolver indexNameExpressionResolver) { + this.indexNameExpressionResolver = indexNameExpressionResolver; + return this; + } + + public IndicesServiceBuilder mapperRegistry(MapperRegistry mapperRegistry) { + this.mapperRegistry = mapperRegistry; + return this; + } + + public IndicesServiceBuilder namedWriteableRegistry(NamedWriteableRegistry namedWriteableRegistry) { + this.namedWriteableRegistry = namedWriteableRegistry; + return this; + } + + public IndicesServiceBuilder threadPool(ThreadPool threadPool) { + this.threadPool = threadPool; + return this; + } + + public IndicesServiceBuilder indexScopedSettings(IndexScopedSettings indexScopedSettings) { + this.indexScopedSettings = indexScopedSettings; + return this; + } + + public IndicesServiceBuilder circuitBreakerService(CircuitBreakerService circuitBreakerService) { + this.circuitBreakerService = circuitBreakerService; + return this; + } + + public IndicesServiceBuilder bigArrays(BigArrays bigArrays) { + this.bigArrays = bigArrays; + return this; + } + + public IndicesServiceBuilder scriptService(ScriptService scriptService) { + this.scriptService = scriptService; + return this; + } + + public IndicesServiceBuilder clusterService(ClusterService clusterService) { + this.clusterService = clusterService; + return this; + } + + public IndicesServiceBuilder client(Client client) { + this.client = client; + return this; + } + + public IndicesServiceBuilder featureService(FeatureService featureService) { + this.featureService = featureService; + return this; + } + + public IndicesServiceBuilder metaStateService(MetaStateService metaStateService) { + this.metaStateService = metaStateService; + return this; + } + + public IndicesServiceBuilder valuesSourceRegistry(ValuesSourceRegistry valuesSourceRegistry) { + this.valuesSourceRegistry = valuesSourceRegistry; + return this; + } + + public IndicesServiceBuilder requestCacheKeyDifferentiator( + CheckedBiConsumer requestCacheKeyDifferentiator + ) { + this.requestCacheKeyDifferentiator = requestCacheKeyDifferentiator; + return this; + } + + public IndicesServiceBuilder documentParsingObserverSupplier(Supplier documentParsingObserverSupplier) { + this.documentParsingObserverSupplier = documentParsingObserverSupplier; + return this; + } + + public IndicesService build() { + Objects.requireNonNull(settings); + Objects.requireNonNull(pluginsService); + Objects.requireNonNull(nodeEnv); + Objects.requireNonNull(xContentRegistry); + Objects.requireNonNull(analysisRegistry); + Objects.requireNonNull(indexNameExpressionResolver); + Objects.requireNonNull(mapperRegistry); + Objects.requireNonNull(namedWriteableRegistry); + Objects.requireNonNull(threadPool); + Objects.requireNonNull(indexScopedSettings); + Objects.requireNonNull(circuitBreakerService); + Objects.requireNonNull(bigArrays); + Objects.requireNonNull(scriptService); + Objects.requireNonNull(clusterService); + Objects.requireNonNull(client); + Objects.requireNonNull(featureService); + Objects.requireNonNull(metaStateService); + Objects.requireNonNull(engineFactoryProviders); + Objects.requireNonNull(directoryFactories); + Objects.requireNonNull(recoveryStateFactories); + Objects.requireNonNull(indexFoldersDeletionListeners); + Objects.requireNonNull(snapshotCommitSuppliers); + Objects.requireNonNull(documentParsingObserverSupplier); + + // collect engine factory providers from plugins + engineFactoryProviders = pluginsService.filterPlugins(EnginePlugin.class) + .>>map(plugin -> plugin::getEngineFactory) + .toList(); + + directoryFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getDirectoryFactories) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + recoveryStateFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getRecoveryStateFactories) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + indexFoldersDeletionListeners = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getIndexFoldersDeletionListeners) + .flatMap(List::stream) + .toList(); + + snapshotCommitSuppliers = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getSnapshotCommitSuppliers) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + return new IndicesService(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 56b0a07fcbc71..3ff760b753886 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -49,7 +50,6 @@ import java.util.Map.Entry; import java.util.Optional; import java.util.Set; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -530,7 +530,7 @@ public static IllegalArgumentException netNewSystemIndexAccessException(ThreadCo ); } else { return new IllegalArgumentException( - "Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " use and access is reserved for system operations" + "Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " may not be accessed by product [" + product + "]" ); } } @@ -929,7 +929,7 @@ public static void cleanUpFeature( Metadata metadata = clusterService.state().getMetadata(); final List exceptions = new ArrayList<>(); - final Consumer handleResponse = resetFeatureStateStatus -> { + final CheckedConsumer handleResponse = resetFeatureStateStatus -> { if (resetFeatureStateStatus.getStatus() == ResetFeatureStateStatus.Status.FAILURE) { synchronized (exceptions) { exceptions.add(resetFeatureStateStatus.getException()); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java new file mode 100644 index 0000000000000..3e018385ccc7a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.breaker; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * A class collecting trip counters for circuit breakers (parent, field data, request, in flight requests and custom child circuit + * breakers). + * + * The circuit breaker name is part of the (long) counter metric name instead of being an attribute because aggregating distinct circuit + * breakers trip counter values does not make sense, as for instance, summing es.breaker.field_data.trip.total and + * es.breaker.in_flight_requests.trip.total. + * Those counters trip for different reasons even if the underlying reason is "too much memory usage". Aggregating them together results in + * losing the ability to understand where the underlying issue is (too much field data, too many concurrent requests, too large concurrent + * requests?). Aggregating each one of them separately to get, for instance, cluster level or cloud region level statistics is perfectly + * fine, instead. + * + * NOTE: here we have the ability to register custom trip counters too. This ability is something a few plugins take advantage of nowadays. + * At the time of writing this class it is just "Eql" and "MachineLearning" which track memory used to store "things" that are + * application/plugin specific such as eql sequence query objects and inference model objects. As a result, we just have a couple of this + * custom counters. This means we have 6 circuit breaker counter metrics per node (parent, field_data, request, in_flight_requests, + * eql_sequence and model_inference). We register them a bit differently to keep the ability for plugins to define their own circuit breaker + * trip counters. + */ +public class CircuitBreakerMetrics { + public static final CircuitBreakerMetrics NOOP = new CircuitBreakerMetrics(TelemetryProvider.NOOP, Collections.emptyMap()); + public static final String ES_BREAKER_PARENT_TRIP_COUNT_TOTAL = "es.breaker.parent.trip.total"; + public static final String ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL = "es.breaker.field_data.trip.total"; + public static final String ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL = "es.breaker.request.trip.total"; + public static final String ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL = "es.breaker.in_flight_requests.trip.total"; + + private static final String ES_BREAKER_CUSTOM_TRIP_COUNT_TOTAL_TEMPLATE = "es.breaker.%s.trip.total"; + private final MeterRegistry registry; + private final LongCounter parentTripCountTotal; + private final LongCounter fielddataTripCountTotal; + private final LongCounter requestTripCountTotal; + private final LongCounter inFlightRequestsCountTotal; + private final Map customTripCountsTotal; + + private CircuitBreakerMetrics( + final MeterRegistry registry, + final LongCounter parentTripCountTotal, + final LongCounter fielddataTripCountTotal, + final LongCounter requestTripCountTotal, + final LongCounter inFlightRequestsCountTotal, + final Map customTripCountsTotal + ) { + this.registry = registry; + this.parentTripCountTotal = parentTripCountTotal; + this.fielddataTripCountTotal = fielddataTripCountTotal; + this.requestTripCountTotal = requestTripCountTotal; + this.inFlightRequestsCountTotal = inFlightRequestsCountTotal; + this.customTripCountsTotal = customTripCountsTotal; + } + + public CircuitBreakerMetrics(final TelemetryProvider telemetryProvider, final Map customTripCounters) { + this( + telemetryProvider.getMeterRegistry(), + telemetryProvider.getMeterRegistry() + .registerLongCounter(ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, "Parent circuit breaker trip count", "count"), + telemetryProvider.getMeterRegistry() + .registerLongCounter(ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL, "Field data circuit breaker trip count", "count"), + telemetryProvider.getMeterRegistry() + .registerLongCounter(ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL, "Request circuit breaker trip count", "count"), + telemetryProvider.getMeterRegistry() + .registerLongCounter( + ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL, + "In-flight requests circuit breaker trip count", + "count" + ), + customTripCounters + ); + } + + public LongCounter getParentTripCountTotal() { + return parentTripCountTotal; + } + + public LongCounter getFielddataTripCountTotal() { + return fielddataTripCountTotal; + } + + public LongCounter getRequestTripCountTotal() { + return requestTripCountTotal; + } + + public LongCounter getInFlightRequestsCountTotal() { + return inFlightRequestsCountTotal; + } + + public Map getCustomTripCountsTotal() { + return customTripCountsTotal; + } + + public LongCounter getCustomTripCount(final String name, final LongCounter theDefault) { + return this.customTripCountsTotal.getOrDefault(name, theDefault); + } + + public LongCounter getCustomTripCount(final String name) { + return this.customTripCountsTotal.getOrDefault(name, LongCounter.NOOP); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CircuitBreakerMetrics that = (CircuitBreakerMetrics) o; + return Objects.equals(registry, that.registry) + && Objects.equals(parentTripCountTotal, that.parentTripCountTotal) + && Objects.equals(fielddataTripCountTotal, that.fielddataTripCountTotal) + && Objects.equals(requestTripCountTotal, that.requestTripCountTotal) + && Objects.equals(inFlightRequestsCountTotal, that.inFlightRequestsCountTotal) + && Objects.equals(customTripCountsTotal, that.customTripCountsTotal); + } + + @Override + public int hashCode() { + return Objects.hash( + registry, + parentTripCountTotal, + fielddataTripCountTotal, + requestTripCountTotal, + inFlightRequestsCountTotal, + customTripCountsTotal + ); + } + + @Override + public String toString() { + return "CircuitBreakerMetrics{" + + "registry=" + + registry + + ", parentTripCountTotal=" + + parentTripCountTotal + + ", fielddataTripCountTotal=" + + fielddataTripCountTotal + + ", requestTripCountTotal=" + + requestTripCountTotal + + ", inFlightRequestsCountTotal=" + + inFlightRequestsCountTotal + + ", customTripCountsTotal=" + + customTripCountsTotal + + '}'; + } + + public void addCustomCircuitBreaker(final CircuitBreaker circuitBreaker) { + if (this.customTripCountsTotal.containsKey(circuitBreaker.getName())) { + throw new IllegalArgumentException("A circuit circuitBreaker named [" + circuitBreaker.getName() + " already exists"); + } + final String canonicalName = Strings.format(ES_BREAKER_CUSTOM_TRIP_COUNT_TOTAL_TEMPLATE, circuitBreaker.getName()); + this.customTripCountsTotal.put( + canonicalName, + registry.registerLongCounter(canonicalName, "A custom circuit circuitBreaker [" + circuitBreaker.getName() + "]", "count") + ); + } + +} diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 86b6013895263..9e995c084a555 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -25,6 +25,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.GcNames; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.telemetry.metric.LongCounter; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; @@ -141,17 +142,24 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { // Tripped count for when redistribution was attempted but wasn't successful private final AtomicLong parentTripCount = new AtomicLong(0); + private final LongCounter parentTripCountTotalMetric; private final Function overLimitStrategyFactory; private volatile OverLimitStrategy overLimitStrategy; @SuppressWarnings("this-escape") - public HierarchyCircuitBreakerService(Settings settings, List customBreakers, ClusterSettings clusterSettings) { - this(settings, customBreakers, clusterSettings, HierarchyCircuitBreakerService::createOverLimitStrategy); + public HierarchyCircuitBreakerService( + CircuitBreakerMetrics metrics, + Settings settings, + List customBreakers, + ClusterSettings clusterSettings + ) { + this(metrics, settings, customBreakers, clusterSettings, HierarchyCircuitBreakerService::createOverLimitStrategy); } @SuppressWarnings("this-escape") HierarchyCircuitBreakerService( + CircuitBreakerMetrics metrics, Settings settings, List customBreakers, ClusterSettings clusterSettings, @@ -162,6 +170,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c childCircuitBreakers.put( CircuitBreaker.FIELDDATA, validateAndCreateBreaker( + metrics.getFielddataTripCountTotal(), new BreakerSettings( CircuitBreaker.FIELDDATA, FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -174,6 +183,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c childCircuitBreakers.put( CircuitBreaker.IN_FLIGHT_REQUESTS, validateAndCreateBreaker( + metrics.getInFlightRequestsCountTotal(), new BreakerSettings( CircuitBreaker.IN_FLIGHT_REQUESTS, IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -186,6 +196,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c childCircuitBreakers.put( CircuitBreaker.REQUEST, validateAndCreateBreaker( + metrics.getRequestTripCountTotal(), new BreakerSettings( CircuitBreaker.REQUEST, REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -203,7 +214,10 @@ public HierarchyCircuitBreakerService(Settings settings, List c + "] exists. Circuit breaker names must be unique" ); } - childCircuitBreakers.put(breakerSettings.getName(), validateAndCreateBreaker(breakerSettings)); + childCircuitBreakers.put( + breakerSettings.getName(), + validateAndCreateBreaker(metrics.getCustomTripCount(breakerSettings.getName()), breakerSettings) + ); } this.breakers = Map.copyOf(childCircuitBreakers); this.parentSettings = new BreakerSettings( @@ -247,6 +261,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c this.overLimitStrategyFactory = overLimitStrategyFactory; this.overLimitStrategy = overLimitStrategyFactory.apply(this.trackRealMemoryUsage); + this.parentTripCountTotalMetric = metrics.getParentTripCountTotal(); } private void updateCircuitBreakerSettings(String name, ByteSizeValue newLimit, Double newOverhead) { @@ -399,6 +414,7 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit long parentLimit = this.parentSettings.getLimit(); if (memoryUsed.totalUsage > parentLimit && overLimitStrategy.overLimit(memoryUsed).totalUsage > parentLimit) { this.parentTripCount.incrementAndGet(); + this.parentTripCountTotalMetric.increment(); final String messageString = buildParentTripMessage( newBytesReserved, label, @@ -474,12 +490,13 @@ static void appendBytesSafe(StringBuilder stringBuilder, long bytes) { } } - private CircuitBreaker validateAndCreateBreaker(BreakerSettings breakerSettings) { + private CircuitBreaker validateAndCreateBreaker(LongCounter trippedCountMeter, BreakerSettings breakerSettings) { // Validate the settings validateSettings(new BreakerSettings[] { breakerSettings }); return breakerSettings.getType() == CircuitBreaker.Type.NOOP ? new NoopCircuitBreaker(breakerSettings.getName()) : new ChildMemoryCircuitBreaker( + trippedCountMeter, breakerSettings, LogManager.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()), this, @@ -501,7 +518,7 @@ static OverLimitStrategy createOverLimitStrategy(boolean trackRealMemoryUsage) { HierarchyCircuitBreakerService::realMemoryUsage, createYoungGcCountSupplier(), System::currentTimeMillis, - 5000, + 500, lockTimeout ); } else { @@ -542,6 +559,8 @@ static class G1OverLimitStrategy implements OverLimitStrategy { private long blackHole; private final ReleasableLock lock = new ReleasableLock(new ReentrantLock()); + // used to throttle logging + private int attemptNo; G1OverLimitStrategy( JvmInfo jvmInfo, @@ -588,9 +607,12 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { boolean leader = false; int allocationIndex = 0; long allocationDuration = 0; + long begin = 0; + int attemptNoCopy = 0; try (ReleasableLock locked = lock.tryAcquire(lockTimeout)) { if (locked != null) { - long begin = timeSupplier.getAsLong(); + attemptNoCopy = ++this.attemptNo; + begin = timeSupplier.getAsLong(); leader = begin >= lastCheckTime + minimumInterval; overLimitTriggered(leader); if (leader) { @@ -622,9 +644,11 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { long now = timeSupplier.getAsLong(); this.lastCheckTime = now; allocationDuration = now - begin; + this.attemptNo = 0; } } } catch (InterruptedException e) { + logger.info("could not acquire lock when attempting to trigger G1GC due to high heap usage"); Thread.currentThread().interrupt(); // fallthrough } @@ -639,6 +663,13 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { allocationIndex, allocationDuration ); + } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { + logger.info( + "memory usage down after [{}], before [{}], after [{}]", + begin - lastCheckTime, + memoryUsed.baseUsage, + current + ); } return new MemoryUsage( current, @@ -655,6 +686,13 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { allocationIndex, allocationDuration ); + } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { + logger.info( + "memory usage not down after [{}], before [{}], after [{}]", + begin - lastCheckTime, + memoryUsed.baseUsage, + current + ); } // prefer original measurement when reporting if heap usage was not brought down. return memoryUsed; diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index cf9378aabb993..c84a2fd343e8f 100644 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -37,6 +38,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.function.ToLongBiFunction; public final class IndicesFieldDataCache implements RemovalListener, Releasable { @@ -48,6 +50,11 @@ public final class IndicesFieldDataCache implements RemovalListener INDICES_FIELDDATA_CACHE_EXPIRE = Setting.positiveTimeSetting( + "indices.fielddata.cache.expire", + new TimeValue(1, TimeUnit.HOURS), + Property.NodeScope + ); private final IndexFieldDataCache.Listener indicesFieldDataCacheListener; private final Cache cache; @@ -58,6 +65,10 @@ public IndicesFieldDataCache(Settings settings, IndexFieldDataCache.Listener ind if (sizeInBytes > 0) { cacheBuilder.setMaximumWeight(sizeInBytes).weigher(new FieldDataWeigher()); } + final TimeValue expire = INDICES_FIELDDATA_CACHE_EXPIRE.get(settings); + if (expire != null && expire.getNanos() > 0) { + cacheBuilder.setExpireAfterAccess(expire); + } cache = cacheBuilder.build(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index a570c88ddaba7..e6ec6f25a71a9 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -340,7 +340,9 @@ public void onFailure(Exception e) { final var sendShardFailure = // these indicate the source shard has already failed, which will independently notify the master and fail // the target shard - false == (cause instanceof ShardNotFoundException || cause instanceof IndexNotFoundException); + false == (cause instanceof ShardNotFoundException + || cause instanceof IndexNotFoundException + || cause instanceof AlreadyClosedException); // TODO retries? See RecoveryResponseHandler#handleException onGoingRecoveries.failRecovery( diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 47405e0daa0a7..2e10a5de2d4e1 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -14,7 +14,6 @@ import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.settings.ClusterSettings; @@ -47,7 +46,6 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; public class RecoverySettings { - public static final Version SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = Version.V_7_15_0; public static final IndexVersion SNAPSHOT_RECOVERIES_SUPPORTED_INDEX_VERSION = IndexVersions.V_7_15_0; public static final TransportVersion SNAPSHOT_RECOVERIES_SUPPORTED_TRANSPORT_VERSION = TransportVersions.V_7_15_0; public static final IndexVersion SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = IndexVersions.V_7_16_0; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java index 07d62fb87fe55..e15ec4c339a94 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java @@ -51,7 +51,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.indices.recovery.RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_VERSION; public class ShardSnapshotsService { private static final Logger logger = LogManager.getLogger(ShardSnapshotsService.class); @@ -84,13 +83,8 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener fetchSnapshotFiles(ShardId shardId, GetShardSnap } } - protected boolean masterSupportsFetchingLatestSnapshots() { - return clusterService.state().nodes().getMinNodeVersion().onOrAfter(SNAPSHOT_RECOVERIES_SUPPORTED_VERSION); - } - private static final class StoreFileMetadataDirectory extends Directory { private final Map files; diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index 2d7ee9f210e64..499cf5d5ca64f 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import java.io.Closeable; +import java.util.List; import java.util.Map; import java.util.Set; @@ -61,7 +62,7 @@ public interface InferenceService extends Closeable { * @param taskSettings Settings in the request to override the model's defaults * @param listener Inference result listener */ - void infer(Model model, String input, Map taskSettings, ActionListener listener); + void infer(Model model, List input, Map taskSettings, ActionListener listener); /** * Start or prepare the model for use. diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java new file mode 100644 index 0000000000000..37990caeec097 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.inference; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.xcontent.ToXContentFragment; + +import java.util.List; +import java.util.Map; + +public interface InferenceServiceResults extends NamedWriteable, ToXContentFragment { + + /** + * Transform the result to match the format required for versions prior to + * {@link org.elasticsearch.TransportVersions#INFERENCE_SERVICE_RESULTS_ADDED} + */ + List transformToLegacyFormat(); + + /** + * Convert the result to a map to aid with test assertions + */ + Map asMap(); +} diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 3adaab078ad4a..3a2a810dc61b5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -206,6 +206,22 @@ public IngestService( this.taskQueue = clusterService.createTaskQueue("ingest-pipelines", Priority.NORMAL, PIPELINE_TASK_EXECUTOR); } + /** + * This copy constructor returns a copy of the given ingestService, using all of the same internal state. The returned copy is not + * registered to listen to any cluster state changes + * @param ingestService + */ + IngestService(IngestService ingestService) { + this.clusterService = ingestService.clusterService; + this.scriptService = ingestService.scriptService; + this.documentParsingObserverSupplier = ingestService.documentParsingObserverSupplier; + this.processorFactories = ingestService.processorFactories; + this.threadPool = ingestService.threadPool; + this.taskQueue = ingestService.taskQueue; + this.pipelines = ingestService.pipelines; + this.state = ingestService.state; + } + private static Map processorFactories(List ingestPlugins, Processor.Parameters parameters) { Map processorFactories = new TreeMap<>(); for (IngestPlugin ingestPlugin : ingestPlugins) { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java index 51bbed8f7b09f..e197af5fbb46a 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; public record IngestStats(Stats totalStats, List pipelineStats, Map> processorStats) implements @@ -266,6 +267,12 @@ public record ProcessorStat(String name, String type, Stats stats) { // both lists using a common index iterator. private static List merge(List first, List second) { var merged = new ArrayList(); + assert first.size() == second.size() + : "stats size mismatch [" + + first.stream().map(ps -> ps.name + ":" + ps.type).collect(Collectors.joining(",")) + + "] [" + + second.stream().map(ps -> ps.name + ":" + ps.type).collect(Collectors.joining(",")) + + "]"; for (var i = 0; i < first.size(); i++) { merged.add(new ProcessorStat(first.get(i).name, first.get(i).type, Stats.merge(first.get(i).stats, second.get(i).stats))); } diff --git a/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java b/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java new file mode 100644 index 0000000000000..2f9da248b2afb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/SimulateIngestService.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.SimulateBulkRequest; + +import java.util.HashMap; +import java.util.Map; + +/** + * This is an implementation of IngestService that allows us to substitute pipeline definitions so that users can simulate ingest using + * pipelines that they define on the fly. + */ +public class SimulateIngestService extends IngestService { + private final Map pipelineSubstitutions; + + public SimulateIngestService(IngestService ingestService, BulkRequest request) { + super(ingestService); + if (request instanceof SimulateBulkRequest simulateBulkRequest) { + try { + pipelineSubstitutions = getPipelineSubstitutions(simulateBulkRequest.getPipelineSubstitutions(), ingestService); + } catch (Exception e) { + throw new RuntimeException(e); + } + } else { + throw new IllegalArgumentException("Expecting a SimulateBulkRequest but got " + request.getClass()); + } + } + + /** + * This transforms the pipeline substitutions from a SimulateBulkRequest into a new map, where the key is the pipelineId and the + * value is the Pipeline instance. The Pipeline is created using the Processor.Factories and the ScriptService of the given + * ingestService. + * @param rawPipelineSubstitutions The pipeline substitutions map received from a SimulateBulkRequest + * @param ingestService The ingestService beoing used + * @return A transformed version of rawPipelineSubstitutions, where the values are Pipeline objects + * @throws Exception + */ + private Map getPipelineSubstitutions( + Map> rawPipelineSubstitutions, + IngestService ingestService + ) throws Exception { + Map parsedPipelineSubstitutions = new HashMap<>(); + if (rawPipelineSubstitutions != null) { + for (Map.Entry> entry : rawPipelineSubstitutions.entrySet()) { + String pipelineId = entry.getKey(); + Pipeline pipeline = Pipeline.create( + pipelineId, + entry.getValue(), + ingestService.getProcessorFactories(), + ingestService.getScriptService() + ); + parsedPipelineSubstitutions.put(pipelineId, pipeline); + } + } + return parsedPipelineSubstitutions; + } + + /** + * This method returns the Pipeline for the given pipelineId. If a substitute definition of the pipeline has been defined for the + * current simulate, then that pipeline is returned. Otherwise, the pipeline stored in the cluster state is returned. + */ + @Override + public Pipeline getPipeline(String pipelineId) { + Pipeline pipeline = pipelineSubstitutions.get(pipelineId); + if (pipeline == null) { + pipeline = super.getPipeline(pipelineId); + } + return pipeline; + } +} diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java index eb87dc982543f..6ae2f53a94ad8 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java @@ -48,7 +48,7 @@ public Snippet[] format(Passage[] passages, String content) { assert end > start; // Look ahead to expand 'end' past all overlapping: while (i + 1 < passage.getNumMatches() && passage.getMatchStarts()[i + 1] < end) { - end = passage.getMatchEnds()[++i]; + end = Math.max(passage.getMatchEnds()[++i], end); } end = Math.min(end, passage.getEndOffset()); // in case match straddles past passage diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index bd33a747b36fd..ca254e20e8b37 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -37,12 +37,9 @@ import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.MasterHistoryService; -import org.elasticsearch.cluster.coordination.Reconfigurator; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; -import org.elasticsearch.cluster.desirednodes.DesiredNodesSettingsValidator; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -64,6 +61,7 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Key; +import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationCategory; @@ -79,6 +77,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -101,18 +100,17 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.analysis.AnalysisRegistry; -import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.IndicesServiceBuilder; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.indices.SystemIndexMappingUpdateService; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.indices.breaker.BreakerSettings; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -140,9 +138,7 @@ import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.HealthPlugin; -import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.InferenceServicePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; @@ -171,7 +167,6 @@ import org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.reservedstate.service.FileSettingsService; -import org.elasticsearch.rest.RestController; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; @@ -188,6 +183,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -204,17 +200,16 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; -import java.util.function.UnaryOperator; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -245,11 +240,26 @@ static NodeConstruction prepareConstruction( List closeables = new ArrayList<>(); try { NodeConstruction constructor = new NodeConstruction(closeables); + Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider); + ThreadPool threadPool = constructor.createThreadPool(settings); SettingsModule settingsModule = constructor.validateSettings(initialEnvironment.settings(), settings, threadPool); - constructor.construct(threadPool, settingsModule, serviceProvider, forbidPrivateIndexSettings); + SearchModule searchModule = constructor.createSearchModule(settingsModule.getSettings(), threadPool); + constructor.createClientAndRegistries(settingsModule.getSettings(), threadPool, searchModule); + + ScriptService scriptService = constructor.createScriptService(settingsModule, threadPool, serviceProvider); + + constructor.construct( + threadPool, + settingsModule, + searchModule, + scriptService, + constructor.createAnalysisRegistry(), + serviceProvider, + forbidPrivateIndexSettings + ); return constructor; } catch (IOException e) { @@ -268,6 +278,7 @@ static NodeConstruction prepareConstruction( private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(Node.class); private final List resourcesToClose; + private final ModulesBuilder modules = new ModulesBuilder(); /* * References for storing in a Node */ @@ -335,7 +346,7 @@ private Optional getSinglePlugin(Class pluginClass) { return getSinglePlugin(pluginsService.filterPlugins(pluginClass), pluginClass); } - private Optional getSinglePlugin(Stream plugins, Class pluginClass) { + private static Optional getSinglePlugin(Stream plugins, Class pluginClass) { var it = plugins.iterator(); if (it.hasNext() == false) { return Optional.empty(); @@ -345,7 +356,7 @@ private Optional getSinglePlugin(Stream plugins, Class pluginClass) List allPlugins = new ArrayList<>(); allPlugins.add(plugin); it.forEachRemaining(allPlugins::add); - throw new IllegalStateException("A single " + pluginClass.getName() + " was expected but got :" + allPlugins); + throw new IllegalStateException("A single " + pluginClass.getName() + " was expected but got " + allPlugins); } return Optional.of(plugin); } @@ -423,6 +434,7 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr ); pluginsService = serviceProvider.newPluginService(initialEnvironment, envSettings); + modules.bindToInstance(PluginsService.class, pluginsService); Settings settings = Node.mergePluginSettings(pluginsService.pluginMap(), envSettings); /* @@ -431,6 +443,7 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr */ environment = new Environment(settings, initialEnvironment.configFile()); Environment.assertEquivalent(initialEnvironment, environment); + modules.bindToInstance(Environment.class, environment); return settings; } @@ -441,6 +454,7 @@ private ThreadPool createThreadPool(Settings settings) throws IOException { pluginsService.flatMap(p -> p.getExecutorBuilders(settings)).toArray(ExecutorBuilder[]::new) ); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); + modules.bindToInstance(ThreadPool.class, threadPool); // adds the context to the DeprecationLogger so that it does not need to be injected everywhere HeaderWarning.setThreadContext(threadPool.getThreadContext()); @@ -457,13 +471,14 @@ private SettingsModule validateSettings(Settings envSettings, Settings settings, } SettingsExtension.load().forEach(e -> additionalSettings.addAll(e.getSettings())); - // this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool + // this is as early as we can validate settings at this point. we already pass them to ThreadPool // so we might be late here already SettingsModule settingsModule = new SettingsModule( settings, additionalSettings, pluginsService.flatMap(Plugin::getSettingsFilter).toList() ); + modules.add(settingsModule); // creating `NodeEnvironment` breaks the ability to rollback to 7.x on an 8.0 upgrade (`upgradeLegacyNodeFolders`) so do this // after settings validation. @@ -479,91 +494,140 @@ private SettingsModule validateSettings(Settings envSettings, Settings settings, .collect(Collectors.toCollection(LinkedHashSet::new)) ); resourcesToClose.add(nodeEnvironment); + modules.bindToInstance(NodeEnvironment.class, nodeEnvironment); return settingsModule; } - private void construct( - ThreadPool threadPool, - SettingsModule settingsModule, - NodeServiceProvider serviceProvider, - boolean forbidPrivateIndexSettings - ) throws IOException { - - Settings settings = settingsModule.getSettings(); - - final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); - resourcesToClose.add(resourceWatcherService); + private SearchModule createSearchModule(Settings settings, ThreadPool threadPool) { + IndexSearcher.setMaxClauseCount(SearchUtils.calculateMaxClauseValue(threadPool)); + return new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class).toList()); + } - final Set taskHeaders = Stream.concat( - pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), - Task.HEADERS_TO_COPY.stream() - ).collect(Collectors.toSet()); + /** + * Create various objects that are stored as member variables. This is so they are accessible as soon as possible. + */ + private void createClientAndRegistries(Settings settings, ThreadPool threadPool, SearchModule searchModule) { + client = new NodeClient(settings, threadPool); + modules.add(b -> { + b.bind(Client.class).toInstance(client); + b.bind(NodeClient.class).toInstance(client); + }); - final TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) - .orElse(TelemetryProvider.NOOP); + localNodeFactory = new Node.LocalNodeFactory(settings, nodeEnvironment.nodeId()); - final Tracer tracer = telemetryProvider.getTracer(); + InferenceServiceRegistry inferenceServiceRegistry = new InferenceServiceRegistry( + pluginsService.filterPlugins(InferenceServicePlugin.class).toList(), + new InferenceServicePlugin.InferenceServiceFactoryContext(client) + ); + resourcesToClose.add(inferenceServiceRegistry); + modules.bindToInstance(InferenceServiceRegistry.class, inferenceServiceRegistry); - final TaskManager taskManager = new TaskManager(settings, threadPool, taskHeaders, tracer); + namedWriteableRegistry = new NamedWriteableRegistry( + Stream.of( + NetworkModule.getNamedWriteables().stream(), + IndicesModule.getNamedWriteables().stream(), + searchModule.getNamedWriteables().stream(), + pluginsService.flatMap(Plugin::getNamedWriteables), + ClusterModule.getNamedWriteables().stream(), + SystemIndexMigrationExecutor.getNamedWriteables().stream(), + inferenceServiceRegistry.getNamedWriteables().stream() + ).flatMap(Function.identity()).toList() + ); + xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + searchModule.getNamedXContents().stream(), + pluginsService.flatMap(Plugin::getNamedXContent), + ClusterModule.getNamedXWriteables().stream(), + SystemIndexMigrationExecutor.getNamedXContentParsers().stream(), + HealthNodeTaskExecutor.getNamedXContentParsers().stream() + ).flatMap(Function.identity()).toList() + ); + modules.add(b -> { + b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); + b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); + }); + } - client = new NodeClient(settings, threadPool); + private ScriptService createScriptService(SettingsModule settingsModule, ThreadPool threadPool, NodeServiceProvider serviceProvider) { + Settings settings = settingsModule.getSettings(); + ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); - final ScriptModule scriptModule = new ScriptModule(settings, pluginsService.filterPlugins(ScriptPlugin.class).toList()); - final ScriptService scriptService = serviceProvider.newScriptService( + ScriptService scriptService = serviceProvider.newScriptService( pluginsService, settings, scriptModule.engines, scriptModule.contexts, threadPool::absoluteTimeInMillis ); - AnalysisModule analysisModule = new AnalysisModule( + ScriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); + modules.add(b -> { + b.bind(ScriptService.class).toInstance(scriptService); + b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService)); + }); + + return scriptService; + } + + private AnalysisRegistry createAnalysisRegistry() throws IOException { + AnalysisRegistry registry = new AnalysisModule( environment, pluginsService.filterPlugins(AnalysisPlugin.class).toList(), pluginsService.getStablePluginRegistry() - ); - localNodeFactory = new Node.LocalNodeFactory(settings, nodeEnvironment.nodeId()); + ).getAnalysisRegistry(); + modules.bindToInstance(AnalysisRegistry.class, registry); + return registry; + } - ScriptModule.registerClusterSettingsListeners(scriptService, settingsModule.getClusterSettings()); - final NetworkService networkService = new NetworkService( - pluginsService.filterPlugins(DiscoveryPlugin.class) - .map(d -> d.getCustomNameResolver(environment.settings())) - .filter(Objects::nonNull) - .toList() + private void construct( + ThreadPool threadPool, + SettingsModule settingsModule, + SearchModule searchModule, + ScriptService scriptService, + AnalysisRegistry analysisRegistry, + NodeServiceProvider serviceProvider, + boolean forbidPrivateIndexSettings + ) throws IOException { + + Settings settings = settingsModule.getSettings(); + + TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) + .orElse(TelemetryProvider.NOOP); + modules.bindToInstance(Tracer.class, telemetryProvider.getTracer()); + + TaskManager taskManager = new TaskManager( + settings, + threadPool, + Stream.concat( + pluginsService.filterPlugins(ActionPlugin.class).flatMap(p -> p.getTaskHeaders().stream()), + Task.HEADERS_TO_COPY.stream() + ).collect(Collectors.toSet()), + telemetryProvider.getTracer() ); - List clusterPlugins = pluginsService.filterPlugins(ClusterPlugin.class).toList(); - final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool, taskManager); + ClusterService clusterService = createClusterService(settingsModule, threadPool, taskManager); clusterService.addStateApplier(scriptService); - resourcesToClose.add(clusterService); - - final Set> consistentSettings = settingsModule.getConsistentSettings(); - if (consistentSettings.isEmpty() == false) { - clusterService.addLocalNodeMasterListener( - new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() - ); - } Supplier documentParsingObserverSupplier = getDocumentParsingObserverSupplier(); - var factoryContext = new InferenceServicePlugin.InferenceServiceFactoryContext(client); - final InferenceServiceRegistry inferenceServiceRegistry = new InferenceServiceRegistry( - pluginsService.filterPlugins(InferenceServicePlugin.class).toList(), - factoryContext - ); - final IngestService ingestService = new IngestService( clusterService, threadPool, environment, scriptService, - analysisModule.getAnalysisRegistry(), + analysisRegistry, pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), documentParsingObserverSupplier ); + + SystemIndices systemIndices = createSystemIndices(settings); + final SetOnce repositoriesServiceReference = new SetOnce<>(); + final SetOnce rerouteServiceReference = new SetOnce<>(); final ClusterInfoService clusterInfoService = serviceProvider.newClusterInfoService( pluginsService, settings, @@ -571,162 +635,91 @@ private void construct( threadPool, client ); - final UsageService usageService = new UsageService(); - - SearchModule searchModule = new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class).toList()); - IndexSearcher.setMaxClauseCount(SearchUtils.calculateMaxClauseValue(threadPool)); - final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( - Stream.of( - NetworkModule.getNamedWriteables().stream(), - IndicesModule.getNamedWriteables().stream(), - searchModule.getNamedWriteables().stream(), - pluginsService.flatMap(Plugin::getNamedWriteables), - ClusterModule.getNamedWriteables().stream(), - SystemIndexMigrationExecutor.getNamedWriteables().stream(), - inferenceServiceRegistry.getNamedWriteables().stream() - ).flatMap(Function.identity()).toList() - ); - NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( - Stream.of( - NetworkModule.getNamedXContents().stream(), - IndicesModule.getNamedXContents().stream(), - searchModule.getNamedXContents().stream(), - pluginsService.flatMap(Plugin::getNamedXContent), - ClusterModule.getNamedXWriteables().stream(), - SystemIndexMigrationExecutor.getNamedXContentParsers().stream(), - HealthNodeTaskExecutor.getNamedXContentParsers().stream() - ).flatMap(Function.identity()).toList() - ); - final List features = pluginsService.filterPlugins(SystemIndexPlugin.class).map(plugin -> { - SystemIndices.validateFeatureName(plugin.getFeatureName(), plugin.getClass().getCanonicalName()); - return SystemIndices.Feature.fromSystemIndexPlugin(plugin, settings); - }).toList(); - final SystemIndices systemIndices = new SystemIndices(features); - final ExecutorSelector executorSelector = systemIndices.getExecutorSelector(); - - ModulesBuilder modules = new ModulesBuilder(); - final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); - final FsHealthService fsHealthService = new FsHealthService( - settings, - clusterService.getClusterSettings(), - threadPool, - nodeEnvironment - ); - final SetOnce rerouteServiceReference = new SetOnce<>(); final InternalSnapshotsInfoService snapshotsInfoService = new InternalSnapshotsInfoService( settings, clusterService, repositoriesServiceReference::get, rerouteServiceReference::get ); - final WriteLoadForecaster writeLoadForecaster = getWriteLoadForecaster(threadPool, settings, clusterService.getClusterSettings()); final ClusterModule clusterModule = new ClusterModule( settings, clusterService, - clusterPlugins, + pluginsService.filterPlugins(ClusterPlugin.class).toList(), clusterInfoService, snapshotsInfoService, threadPool, systemIndices, - writeLoadForecaster + getWriteLoadForecaster(threadPool, settings, clusterService.getClusterSettings()), + telemetryProvider ); modules.add(clusterModule); + + RerouteService rerouteService = new BatchedRerouteService(clusterService, clusterModule.getAllocationService()::reroute); + rerouteServiceReference.set(rerouteService); + clusterService.setRerouteService(rerouteService); + + clusterInfoService.addListener( + new DiskThresholdMonitor( + settings, + clusterService::state, + clusterService.getClusterSettings(), + client, + threadPool::relativeTimeInMillis, + rerouteService + )::onNewInfo + ); + IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class).toList()); modules.add(indicesModule); - List pluginCircuitBreakers = pluginsService.filterPlugins(CircuitBreakerPlugin.class) - .map(plugin -> plugin.getCircuitBreaker(settings)) - .toList(); - final CircuitBreakerService circuitBreakerService = createCircuitBreakerService( + final Map customTripCounters = new TreeMap<>(); + CircuitBreakerService circuitBreakerService = createCircuitBreakerService( + new CircuitBreakerMetrics(telemetryProvider, customTripCounters), settingsModule.getSettings(), - pluginCircuitBreakers, settingsModule.getClusterSettings() ); - pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { - CircuitBreaker breaker = circuitBreakerService.getBreaker(plugin.getCircuitBreaker(settings).getName()); - plugin.setCircuitBreaker(breaker); - }); - resourcesToClose.add(circuitBreakerService); modules.add(new GatewayModule()); CompatibilityVersions compatibilityVersions = new CompatibilityVersions( TransportVersion.current(), systemIndices.getMappingsVersions() ); + modules.add(loadPersistedClusterStateService(clusterService.getClusterSettings(), threadPool, compatibilityVersions)); + PageCacheRecycler pageCacheRecycler = serviceProvider.newPageCacheRecycler(pluginsService, settings); BigArrays bigArrays = serviceProvider.newBigArrays(pluginsService, pageCacheRecycler, circuitBreakerService); - modules.add(settingsModule); final MetaStateService metaStateService = new MetaStateService(nodeEnvironment, xContentRegistry); - final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService( - xContentRegistry, - clusterService.getClusterSettings(), - threadPool, - compatibilityVersions - ); - // collect engine factory providers from plugins - final Collection>> engineFactoryProviders = pluginsService.filterPlugins( - EnginePlugin.class - ).>>map(plugin -> plugin::getEngineFactory).toList(); - - final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) - .map(IndexStorePlugin::getDirectoryFactories) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - final Map recoveryStateFactories = pluginsService.filterPlugins( - IndexStorePlugin.class - ) - .map(IndexStorePlugin::getRecoveryStateFactories) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - final List indexFoldersDeletionListeners = pluginsService.filterPlugins( - IndexStorePlugin.class - ).map(IndexStorePlugin::getIndexFoldersDeletionListeners).flatMap(List::stream).toList(); - - final Map snapshotCommitSuppliers = pluginsService.filterPlugins( - IndexStorePlugin.class - ) - .map(IndexStorePlugin::getSnapshotCommitSuppliers) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); if (DiscoveryNode.isMasterNode(settings)) { clusterService.addListener(new SystemIndexMappingUpdateService(systemIndices, client)); - clusterService.addListener(new TransportVersionsFixupListener(clusterService, client.admin().cluster(), threadPool)); + clusterService.addListener( + new TransportVersionsFixupListener(clusterService, client.admin().cluster(), featureService, threadPool) + ); } - final RerouteService rerouteService = new BatchedRerouteService(clusterService, clusterModule.getAllocationService()::reroute); - rerouteServiceReference.set(rerouteService); - clusterService.setRerouteService(rerouteService); - - final IndicesService indicesService = new IndicesService( - settings, - pluginsService, - nodeEnvironment, - xContentRegistry, - analysisModule.getAnalysisRegistry(), - clusterModule.getIndexNameExpressionResolver(), - indicesModule.getMapperRegistry(), - namedWriteableRegistry, - threadPool, - settingsModule.getIndexScopedSettings(), - circuitBreakerService, - bigArrays, - scriptService, - clusterService, - client, - metaStateService, - engineFactoryProviders, - indexStoreFactories, - searchModule.getValuesSourceRegistry(), - recoveryStateFactories, - indexFoldersDeletionListeners, - snapshotCommitSuppliers, - searchModule.getRequestCacheKeyDifferentiator(), - documentParsingObserverSupplier - ); + IndicesService indicesService = new IndicesServiceBuilder().settings(settings) + .pluginsService(pluginsService) + .nodeEnvironment(nodeEnvironment) + .xContentRegistry(xContentRegistry) + .analysisRegistry(analysisRegistry) + .indexNameExpressionResolver(clusterModule.getIndexNameExpressionResolver()) + .mapperRegistry(indicesModule.getMapperRegistry()) + .namedWriteableRegistry(namedWriteableRegistry) + .threadPool(threadPool) + .indexScopedSettings(settingsModule.getIndexScopedSettings()) + .circuitBreakerService(circuitBreakerService) + .bigArrays(bigArrays) + .scriptService(scriptService) + .clusterService(clusterService) + .client(client) + .featureService(featureService) + .metaStateService(metaStateService) + .valuesSourceRegistry(searchModule.getValuesSourceRegistry()) + .requestCacheKeyDifferentiator(searchModule.getRequestCacheKeyDifferentiator()) + .documentParsingObserverSupplier(documentParsingObserverSupplier) + .build(); final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); IndexSettingProviders indexSettingProviders = new IndexSettingProviders( @@ -749,12 +742,11 @@ private void construct( indexSettingProviders ); - final MetadataCreateDataStreamService metadataCreateDataStreamService = new MetadataCreateDataStreamService( - threadPool, - clusterService, - metadataCreateIndexService + modules.bindToInstance( + MetadataCreateDataStreamService.class, + new MetadataCreateDataStreamService(threadPool, clusterService, metadataCreateIndexService) ); - final MetadataDataStreamsService metadataDataStreamsService = new MetadataDataStreamsService(clusterService, indicesService); + modules.bindToInstance(MetadataDataStreamsService.class, new MetadataDataStreamsService(clusterService, indicesService)); final MetadataUpdateSettingsService metadataUpdateSettingsService = new MetadataUpdateSettingsService( clusterService, @@ -765,8 +757,6 @@ private void construct( threadPool ); - FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); - record PluginServiceInstances( Client client, ClusterService clusterService, @@ -789,7 +779,7 @@ record PluginServiceInstances( client, clusterService, threadPool, - resourceWatcherService, + createResourceWatcherService(settings, threadPool), scriptService, xContentRegistry, environment, @@ -806,27 +796,6 @@ record PluginServiceInstances( Collection pluginComponents = pluginsService.flatMap(p -> p.createComponents(pluginServices)).toList(); - List> reservedStateHandlers = new ArrayList<>(); - - // add all reserved state handlers from server - reservedStateHandlers.add(new ReservedClusterSettingsAction(settingsModule.getClusterSettings())); - - var templateService = new MetadataIndexTemplateService( - clusterService, - metadataCreateIndexService, - indicesService, - settingsModule.getIndexScopedSettings(), - xContentRegistry, - systemIndices, - indexSettingProviders - ); - - reservedStateHandlers.add(new ReservedComposableIndexTemplateAction(templateService, settingsModule.getIndexScopedSettings())); - - // add all reserved state handlers from plugins - pluginsService.loadServiceProviders(ReservedClusterStateHandlerProvider.class) - .forEach(h -> reservedStateHandlers.addAll(h.handlers())); - var terminationHandlers = pluginsService.loadServiceProviders(TerminationHandlerProvider.class) .stream() .map(TerminationHandlerProvider::handler); @@ -842,16 +811,28 @@ record PluginServiceInstances( pluginsService.filterPlugins(ActionPlugin.class).toList(), client, circuitBreakerService, - usageService, + createUsageService(), systemIndices, - tracer, + telemetryProvider.getTracer(), clusterService, - reservedStateHandlers, + buildReservedStateHandlers( + settingsModule, + clusterService, + indicesService, + systemIndices, + indexSettingProviders, + metadataCreateIndexService + ), pluginsService.loadSingletonServiceProvider(RestExtension.class, RestExtension::allowAll) ); modules.add(actionModule); - final RestController restController = actionModule.getRestController(); + final NetworkService networkService = new NetworkService( + pluginsService.filterPlugins(DiscoveryPlugin.class) + .map(d -> d.getCustomNameResolver(environment.settings())) + .filter(Objects::nonNull) + .toList() + ); final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class).toList(), @@ -862,15 +843,15 @@ record PluginServiceInstances( namedWriteableRegistry, xContentRegistry, networkService, - restController, + actionModule.getRestController(), actionModule::copyRequestHeadersToThreadContext, clusterService.getClusterSettings(), - tracer + telemetryProvider.getTracer() ); - Collection>> indexTemplateMetadataUpgraders = pluginsService.map( - Plugin::getIndexTemplateMetadataUpgrader - ).toList(); - final MetadataUpgrader metadataUpgrader = new MetadataUpgrader(indexTemplateMetadataUpgraders); + + var indexTemplateMetadataUpgraders = pluginsService.map(Plugin::getIndexTemplateMetadataUpgrader).toList(); + modules.bindToInstance(MetadataUpgrader.class, new MetadataUpgrader(indexTemplateMetadataUpgraders)); + final IndexMetadataVerifier indexMetadataVerifier = new IndexMetadataVerifier( settings, clusterService, @@ -893,9 +874,8 @@ record PluginServiceInstances( localNodeFactory, settingsModule.getClusterSettings(), taskManager, - tracer + telemetryProvider.getTracer() ); - final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService( transportService, @@ -956,40 +936,23 @@ record PluginServiceInstances( fileSettingsService, threadPool ); - final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( - settings, - clusterService::state, - clusterService.getClusterSettings(), - client, - threadPool::relativeTimeInMillis, - rerouteService - ); - clusterInfoService.addListener(diskThresholdMonitor::onNewInfo); - final DiscoveryModule discoveryModule = new DiscoveryModule( + DiscoveryModule discoveryModule = createDiscoveryModule( settings, + threadPool, transportService, - client, - namedWriteableRegistry, networkService, - clusterService.getMasterService(), - clusterService.getClusterApplierService(), - clusterService.getClusterSettings(), - pluginsService.filterPlugins(DiscoveryPlugin.class).toList(), - pluginsService.filterPlugins(ClusterCoordinationPlugin.class).toList(), + clusterService, clusterModule.getAllocationService(), - environment.configFile(), - gatewayMetaState, rerouteService, - fsHealthService, circuitBreakerService, compatibilityVersions, - featureService.getNodeFeatures() + featureService ); - this.nodeService = new NodeService( + nodeService = new NodeService( settings, threadPool, - monitorService, + new MonitorService(settings, nodeEnvironment, threadPool), discoveryModule.getCoordinator(), transportService, indicesService, @@ -1017,109 +980,66 @@ record PluginServiceInstances( searchModule.getFetchPhase(), responseCollectorService, circuitBreakerService, - executorSelector, - tracer + systemIndices.getExecutorSelector(), + telemetryProvider.getTracer() ); - final PersistentTasksService persistentTasksService = new PersistentTasksService(clusterService, threadPool, client); - final SystemIndexMigrationExecutor systemIndexMigrationExecutor = new SystemIndexMigrationExecutor( - client, - clusterService, - systemIndices, - metadataUpdateSettingsService, - metadataCreateIndexService, - settingsModule.getIndexScopedSettings() - ); - final HealthNodeTaskExecutor healthNodeTaskExecutor = HealthNodeTaskExecutor.create( - clusterService, - persistentTasksService, - featureService, - settings, - clusterService.getClusterSettings() - ); - final Stream> builtinTaskExecutors = Stream.of(systemIndexMigrationExecutor, healthNodeTaskExecutor); - final Stream> pluginTaskExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) - .map( - p -> p.getPersistentTasksExecutor( - clusterService, - threadPool, - client, - settingsModule, - clusterModule.getIndexNameExpressionResolver() - ) + modules.add( + loadPersistentTasksService( + settingsModule, + clusterService, + threadPool, + systemIndices, + featureService, + clusterModule.getIndexNameExpressionResolver(), + metadataUpdateSettingsService, + metadataCreateIndexService ) - .flatMap(List::stream); - final PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry( - Stream.concat(pluginTaskExecutors, builtinTaskExecutors).toList() - ); - final PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService( - settings, - registry, - clusterService, - threadPool ); - resourcesToClose.add(persistentTasksClusterService); - PluginShutdownService pluginShutdownService = new PluginShutdownService( - pluginsService.filterPlugins(ShutdownAwarePlugin.class).toList() + modules.add( + loadPluginShutdownService(clusterService), + loadDiagnosticServices(settings, discoveryModule.getCoordinator(), clusterService, transportService, featureService, threadPool) ); - clusterService.addListener(pluginShutdownService); - final RecoveryPlannerService recoveryPlannerService = getRecoveryPlannerService(threadPool, clusterService, repositoryService); - final DesiredNodesSettingsValidator desiredNodesSettingsValidator = new DesiredNodesSettingsValidator(); - - final MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); - final CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( - clusterService, - transportService, - discoveryModule.getCoordinator(), - masterHistoryService - ); - final HealthService healthService = createHealthService(clusterService, coordinationDiagnosticsService, threadPool); - HealthPeriodicLogger healthPeriodicLogger = createHealthPeriodicLogger(clusterService, settings, client, healthService); - healthPeriodicLogger.init(); - HealthMetadataService healthMetadataService = HealthMetadataService.create(clusterService, featureService, settings); - LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create( - settings, - clusterService, - nodeService, - threadPool, - client, - featureService - ); - HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService); - HealthApiStats healthApiStats = new HealthApiStats(); + RecoveryPlannerService recoveryPlannerService = getRecoveryPlannerService(threadPool, clusterService, repositoryService); + modules.add(b -> { + serviceProvider.processRecoverySettings(pluginsService, settingsModule.getClusterSettings(), recoverySettings); + SnapshotFilesProvider snapshotFilesProvider = new SnapshotFilesProvider(repositoryService); + var peerRecovery = new PeerRecoverySourceService( + transportService, + indicesService, + clusterService, + recoverySettings, + recoveryPlannerService + ); + resourcesToClose.add(peerRecovery); + b.bind(PeerRecoverySourceService.class).toInstance(peerRecovery); + b.bind(PeerRecoveryTargetService.class) + .toInstance( + new PeerRecoveryTargetService( + client, + threadPool, + transportService, + recoverySettings, + clusterService, + snapshotFilesProvider + ) + ); + }); - List reloadablePlugins = pluginsService.filterPlugins(ReloadablePlugin.class).toList(); - pluginsService.filterPlugins(ReloadAwarePlugin.class).forEach(p -> p.setReloadCallback(wrapPlugins(reloadablePlugins))); + modules.add(loadPluginComponents(pluginComponents)); modules.add(b -> { b.bind(NodeService.class).toInstance(nodeService); - b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); - b.bind(PluginsService.class).toInstance(pluginsService); - b.bind(Client.class).toInstance(client); - b.bind(NodeClient.class).toInstance(client); - b.bind(Environment.class).toInstance(environment); - b.bind(ThreadPool.class).toInstance(threadPool); - b.bind(NodeEnvironment.class).toInstance(nodeEnvironment); - b.bind(ResourceWatcherService.class).toInstance(resourceWatcherService); - b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService); b.bind(BigArrays.class).toInstance(bigArrays); b.bind(PageCacheRecycler.class).toInstance(pageCacheRecycler); - b.bind(ScriptService.class).toInstance(scriptService); - b.bind(AnalysisRegistry.class).toInstance(analysisModule.getAnalysisRegistry()); b.bind(IngestService.class).toInstance(ingestService); b.bind(IndexingPressure.class).toInstance(indexingLimits); - b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); - b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); - b.bind(MetadataUpgrader.class).toInstance(metadataUpgrader); b.bind(MetaStateService.class).toInstance(metaStateService); - b.bind(PersistedClusterStateService.class).toInstance(persistedClusterStateService); b.bind(IndicesService.class).toInstance(indicesService); b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); - b.bind(MetadataCreateDataStreamService.class).toInstance(metadataCreateDataStreamService); - b.bind(MetadataDataStreamsService.class).toInstance(metadataDataStreamsService); b.bind(MetadataUpdateSettingsService.class).toInstance(metadataUpdateSettingsService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); @@ -1127,99 +1047,143 @@ record PluginServiceInstances( b.bind(Transport.class).toInstance(transport); b.bind(TransportService.class).toInstance(transportService); b.bind(NetworkService.class).toInstance(networkService); - b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService)); b.bind(IndexMetadataVerifier.class).toInstance(indexMetadataVerifier); b.bind(ClusterInfoService.class).toInstance(clusterInfoService); b.bind(SnapshotsInfoService.class).toInstance(snapshotsInfoService); - b.bind(GatewayMetaState.class).toInstance(gatewayMetaState); b.bind(FeatureService.class).toInstance(featureService); - b.bind(Coordinator.class).toInstance(discoveryModule.getCoordinator()); - b.bind(Reconfigurator.class).toInstance(discoveryModule.getReconfigurator()); - { - serviceProvider.processRecoverySettings(pluginsService, settingsModule.getClusterSettings(), recoverySettings); - final SnapshotFilesProvider snapshotFilesProvider = new SnapshotFilesProvider(repositoryService); - b.bind(PeerRecoverySourceService.class) - .toInstance( - new PeerRecoverySourceService( - transportService, - indicesService, - clusterService, - recoverySettings, - recoveryPlannerService - ) - ); - b.bind(PeerRecoveryTargetService.class) - .toInstance( - new PeerRecoveryTargetService( - client, - threadPool, - transportService, - recoverySettings, - clusterService, - snapshotFilesProvider - ) - ); - } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); - pluginComponents.forEach(p -> { - if (p instanceof PluginComponentBinding pcb) { - @SuppressWarnings("unchecked") - Class clazz = (Class) pcb.inter(); - b.bind(clazz).toInstance(pcb.impl()); - - } else { - @SuppressWarnings("unchecked") - Class clazz = (Class) p.getClass(); - b.bind(clazz).toInstance(p); - } - }); - b.bind(PersistentTasksService.class).toInstance(persistentTasksService); - b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService); - b.bind(PersistentTasksExecutorRegistry.class).toInstance(registry); b.bind(RepositoriesService.class).toInstance(repositoryService); b.bind(SnapshotsService.class).toInstance(snapshotsService); b.bind(SnapshotShardsService.class).toInstance(snapshotShardsService); b.bind(RestoreService.class).toInstance(restoreService); b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); - b.bind(FsHealthService.class).toInstance(fsHealthService); - b.bind(SystemIndices.class).toInstance(systemIndices); - b.bind(PluginShutdownService.class).toInstance(pluginShutdownService); - b.bind(ExecutorSelector.class).toInstance(executorSelector); b.bind(IndexSettingProviders.class).toInstance(indexSettingProviders); - b.bind(DesiredNodesSettingsValidator.class).toInstance(desiredNodesSettingsValidator); - b.bind(HealthService.class).toInstance(healthService); - b.bind(MasterHistoryService.class).toInstance(masterHistoryService); - b.bind(CoordinationDiagnosticsService.class).toInstance(coordinationDiagnosticsService); - b.bind(HealthNodeTaskExecutor.class).toInstance(healthNodeTaskExecutor); - b.bind(HealthMetadataService.class).toInstance(healthMetadataService); - b.bind(LocalHealthMonitor.class).toInstance(localHealthMonitor); - b.bind(HealthInfoCache.class).toInstance(nodeHealthOverview); - b.bind(HealthApiStats.class).toInstance(healthApiStats); - b.bind(Tracer.class).toInstance(tracer); b.bind(FileSettingsService.class).toInstance(fileSettingsService); - b.bind(WriteLoadForecaster.class).toInstance(writeLoadForecaster); - b.bind(HealthPeriodicLogger.class).toInstance(healthPeriodicLogger); b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); - b.bind(InferenceServiceRegistry.class).toInstance(inferenceServiceRegistry); }); if (ReadinessService.enabled(environment)) { - modules.add( - b -> b.bind(ReadinessService.class) - .toInstance(serviceProvider.newReadinessService(pluginsService, clusterService, environment)) + modules.bindToInstance( + ReadinessService.class, + serviceProvider.newReadinessService(pluginsService, clusterService, environment) ); } injector = modules.createInjector(); - // We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there. - // The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it - // completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation - // service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a - // reroute, which needs to call into the allocation service. We close the loop here: - clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class)); + postInjection(clusterModule, actionModule, clusterService, transportService, featureService); + } + + private ClusterService createClusterService(SettingsModule settingsModule, ThreadPool threadPool, TaskManager taskManager) { + ClusterService clusterService = new ClusterService( + settingsModule.getSettings(), + settingsModule.getClusterSettings(), + threadPool, + taskManager + ); + resourcesToClose.add(clusterService); + + Set> consistentSettings = settingsModule.getConsistentSettings(); + if (consistentSettings.isEmpty() == false) { + clusterService.addLocalNodeMasterListener( + new ConsistentSettingsService(settingsModule.getSettings(), clusterService, consistentSettings).newHashPublisher() + ); + } + return clusterService; + } + + private UsageService createUsageService() { + UsageService usageService = new UsageService(); + modules.bindToInstance(UsageService.class, usageService); + return usageService; + } + + private SystemIndices createSystemIndices(Settings settings) { + List features = pluginsService.filterPlugins(SystemIndexPlugin.class).map(plugin -> { + SystemIndices.validateFeatureName(plugin.getFeatureName(), plugin.getClass().getCanonicalName()); + return SystemIndices.Feature.fromSystemIndexPlugin(plugin, settings); + }).toList(); + + SystemIndices systemIndices = new SystemIndices(features); + modules.add(b -> { + b.bind(SystemIndices.class).toInstance(systemIndices); + b.bind(ExecutorSelector.class).toInstance(systemIndices.getExecutorSelector()); + }); + return systemIndices; + } + + private ResourceWatcherService createResourceWatcherService(Settings settings, ThreadPool threadPool) { + ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); + resourcesToClose.add(resourceWatcherService); + modules.bindToInstance(ResourceWatcherService.class, resourceWatcherService); + return resourceWatcherService; + } + + private Module loadPluginShutdownService(ClusterService clusterService) { + PluginShutdownService pluginShutdownService = new PluginShutdownService( + pluginsService.filterPlugins(ShutdownAwarePlugin.class).toList() + ); + clusterService.addListener(pluginShutdownService); + + return b -> b.bind(PluginShutdownService.class).toInstance(pluginShutdownService); + } + + private Module loadDiagnosticServices( + Settings settings, + Coordinator coordinator, + ClusterService clusterService, + TransportService transportService, + FeatureService featureService, + ThreadPool threadPool + ) { + + MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); + CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( + clusterService, + transportService, + coordinator, + masterHistoryService + ); + + var serverHealthIndicatorServices = Stream.of( + new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), + new RepositoryIntegrityHealthIndicatorService(clusterService), + new DiskHealthIndicatorService(clusterService), + new ShardsCapacityHealthIndicatorService(clusterService) + ); + var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) + .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); + HealthService healthService = new HealthService( + Stream.concat(serverHealthIndicatorServices, pluginHealthIndicatorServices).toList(), + threadPool + ); + HealthPeriodicLogger healthPeriodicLogger = HealthPeriodicLogger.create(settings, clusterService, client, healthService); + HealthMetadataService healthMetadataService = HealthMetadataService.create(clusterService, featureService, settings); + LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create( + settings, + clusterService, + nodeService, + threadPool, + client, + featureService + ); + HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService); + + return b -> { + b.bind(HealthService.class).toInstance(healthService); + b.bind(MasterHistoryService.class).toInstance(masterHistoryService); + b.bind(CoordinationDiagnosticsService.class).toInstance(coordinationDiagnosticsService); + b.bind(HealthMetadataService.class).toInstance(healthMetadataService); + b.bind(LocalHealthMonitor.class).toInstance(localHealthMonitor); + b.bind(HealthInfoCache.class).toInstance(nodeHealthOverview); + b.bind(HealthApiStats.class).toInstance(new HealthApiStats()); + b.bind(HealthPeriodicLogger.class).toInstance(healthPeriodicLogger); + }; + } + + private Module loadPluginComponents(Collection pluginComponents) { List pluginLifecycleComponents = pluginComponents.stream().map(p -> { if (p instanceof PluginComponentBinding pcb) { return pcb.impl(); @@ -1227,8 +1191,37 @@ record PluginServiceInstances( return p; }).filter(p -> p instanceof LifecycleComponent).map(p -> (LifecycleComponent) p).toList(); resourcesToClose.addAll(pluginLifecycleComponents); - resourcesToClose.add(injector.getInstance(PeerRecoverySourceService.class)); - this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); + this.pluginLifecycleComponents = pluginLifecycleComponents; + + List reloadablePlugins = pluginsService.filterPlugins(ReloadablePlugin.class).toList(); + pluginsService.filterPlugins(ReloadAwarePlugin.class).forEach(p -> p.setReloadCallback(wrapPlugins(reloadablePlugins))); + + return b -> pluginComponents.forEach(p -> { + if (p instanceof PluginComponentBinding pcb) { + @SuppressWarnings("unchecked") + Class clazz = (Class) pcb.inter(); + b.bind(clazz).toInstance(pcb.impl()); + } else { + @SuppressWarnings("unchecked") + Class clazz = (Class) p.getClass(); + b.bind(clazz).toInstance(p); + } + }); + } + + private void postInjection( + ClusterModule clusterModule, + ActionModule actionModule, + ClusterService clusterService, + TransportService transportService, + FeatureService featureService + ) { + // We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there. + // The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it + // completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation + // service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a + // reroute, which needs to call into the allocation service. We close the loop here: + clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class)); // Due to Java's type erasure with generics, the injector can't give us exactly what we need, and we have // to resort to some evil casting. @@ -1245,8 +1238,6 @@ record PluginServiceInstances( transportService.getRemoteClusterService(), namedWriteableRegistry ); - this.namedWriteableRegistry = namedWriteableRegistry; - this.xContentRegistry = xContentRegistry; logger.debug("initializing HTTP handlers ..."); actionModule.initRestHandlers(() -> clusterService.state().nodesIfRecovered(), f -> { @@ -1262,21 +1253,40 @@ private Supplier getDocumentParsingObserverSupplier() { } /** - * Creates a new {@link CircuitBreakerService} based on the settings provided. + * Create and initialize a new {@link CircuitBreakerService} based on the settings provided. * * @see Node#BREAKER_TYPE_KEY */ - private static CircuitBreakerService createCircuitBreakerService( + private CircuitBreakerService createCircuitBreakerService( + CircuitBreakerMetrics metrics, Settings settings, - List breakerSettings, ClusterSettings clusterSettings ) { + var pluginBreakers = pluginsService.filterPlugins(CircuitBreakerPlugin.class) + .map(p -> Tuple.tuple(p, p.getCircuitBreaker(settings))) + .toList(); + String type = Node.BREAKER_TYPE_KEY.get(settings); - return switch (type) { - case "hierarchy" -> new HierarchyCircuitBreakerService(settings, breakerSettings, clusterSettings); + CircuitBreakerService circuitBreakerService = switch (type) { + case "hierarchy" -> new HierarchyCircuitBreakerService( + metrics, + settings, + pluginBreakers.stream().map(Tuple::v2).toList(), + clusterSettings + ); case "none" -> new NoneCircuitBreakerService(); default -> throw new IllegalArgumentException("Unknown circuit breaker type [" + type + "]"); }; + resourcesToClose.add(circuitBreakerService); + modules.bindToInstance(CircuitBreakerService.class, circuitBreakerService); + + pluginBreakers.forEach(t -> { + final CircuitBreaker circuitBreaker = circuitBreakerService.getBreaker(t.v2().getName()); + t.v1().setCircuitBreaker(circuitBreaker); + metrics.addCustomCircuitBreaker(circuitBreaker); + }); + + return circuitBreakerService; } /** @@ -1296,31 +1306,6 @@ private static ReloadablePlugin wrapPlugins(List reloadablePlu }; } - private HealthService createHealthService( - ClusterService clusterService, - CoordinationDiagnosticsService coordinationDiagnosticsService, - ThreadPool threadPool - ) { - var serverHealthIndicatorServices = Stream.of( - new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), - new RepositoryIntegrityHealthIndicatorService(clusterService), - new DiskHealthIndicatorService(clusterService), - new ShardsCapacityHealthIndicatorService(clusterService) - ); - var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) - .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); - return new HealthService(Stream.concat(serverHealthIndicatorServices, pluginHealthIndicatorServices).toList(), threadPool); - } - - private static HealthPeriodicLogger createHealthPeriodicLogger( - ClusterService clusterService, - Settings settings, - NodeClient client, - HealthService healthService - ) { - return new HealthPeriodicLogger(settings, clusterService, client, healthService); - } - private RecoveryPlannerService getRecoveryPlannerService( ThreadPool threadPool, ClusterService clusterService, @@ -1340,11 +1325,14 @@ private WriteLoadForecaster getWriteLoadForecaster(ThreadPool threadPool, Settin var writeLoadForecasters = pluginsService.filterPlugins(ClusterPlugin.class) .flatMap(clusterPlugin -> clusterPlugin.createWriteLoadForecasters(threadPool, settings, clusterSettings).stream()); - return getSinglePlugin(writeLoadForecasters, WriteLoadForecaster.class).orElse(WriteLoadForecaster.DEFAULT); + WriteLoadForecaster forecaster = getSinglePlugin(writeLoadForecasters, WriteLoadForecaster.class).orElse( + WriteLoadForecaster.DEFAULT + ); + modules.bindToInstance(WriteLoadForecaster.class, forecaster); + return forecaster; } - private PersistedClusterStateService newPersistedClusterStateService( - NamedXContentRegistry xContentRegistry, + private Module loadPersistedClusterStateService( ClusterSettings clusterSettings, ThreadPool threadPool, CompatibilityVersions compatibilityVersions @@ -1353,18 +1341,140 @@ private PersistedClusterStateService newPersistedClusterStateService( .map(ClusterCoordinationPlugin::getPersistedClusterStateServiceFactory) .flatMap(Optional::stream); - return getSinglePlugin(persistedClusterStateServiceFactories, ClusterCoordinationPlugin.PersistedClusterStateServiceFactory.class) - .map( - f -> f.newPersistedClusterStateService( - nodeEnvironment, - xContentRegistry, - clusterSettings, - threadPool, - compatibilityVersions - ) - ) + var service = getSinglePlugin( + persistedClusterStateServiceFactories, + ClusterCoordinationPlugin.PersistedClusterStateServiceFactory.class + ).map(f -> f.newPersistedClusterStateService(nodeEnvironment, xContentRegistry, clusterSettings, threadPool, compatibilityVersions)) .orElseGet( () -> new PersistedClusterStateService(nodeEnvironment, xContentRegistry, clusterSettings, threadPool::relativeTimeInMillis) ); + + return b -> b.bind(PersistedClusterStateService.class).toInstance(service); + } + + private List> buildReservedStateHandlers( + SettingsModule settingsModule, + ClusterService clusterService, + IndicesService indicesService, + SystemIndices systemIndices, + IndexSettingProviders indexSettingProviders, + MetadataCreateIndexService metadataCreateIndexService + ) { + List> reservedStateHandlers = new ArrayList<>(); + + // add all reserved state handlers from server + reservedStateHandlers.add(new ReservedClusterSettingsAction(settingsModule.getClusterSettings())); + + var templateService = new MetadataIndexTemplateService( + clusterService, + metadataCreateIndexService, + indicesService, + settingsModule.getIndexScopedSettings(), + xContentRegistry, + systemIndices, + indexSettingProviders + ); + reservedStateHandlers.add(new ReservedComposableIndexTemplateAction(templateService, settingsModule.getIndexScopedSettings())); + + // add all reserved state handlers from plugins + pluginsService.loadServiceProviders(ReservedClusterStateHandlerProvider.class) + .forEach(h -> reservedStateHandlers.addAll(h.handlers())); + + return reservedStateHandlers; + } + + private DiscoveryModule createDiscoveryModule( + Settings settings, + ThreadPool threadPool, + TransportService transportService, + NetworkService networkService, + ClusterService clusterService, + AllocationService allocationService, + RerouteService rerouteService, + CircuitBreakerService circuitBreakerService, + CompatibilityVersions compatibilityVersions, + FeatureService featureService + ) { + GatewayMetaState gatewayMetaState = new GatewayMetaState(); + FsHealthService fsHealthService = new FsHealthService(settings, clusterService.getClusterSettings(), threadPool, nodeEnvironment); + + DiscoveryModule module = new DiscoveryModule( + settings, + transportService, + client, + namedWriteableRegistry, + networkService, + clusterService.getMasterService(), + clusterService.getClusterApplierService(), + clusterService.getClusterSettings(), + pluginsService.filterPlugins(DiscoveryPlugin.class).toList(), + pluginsService.filterPlugins(ClusterCoordinationPlugin.class).toList(), + allocationService, + environment.configFile(), + gatewayMetaState, + rerouteService, + fsHealthService, + circuitBreakerService, + compatibilityVersions, + featureService + ); + + modules.add(module, b -> { + b.bind(GatewayMetaState.class).toInstance(gatewayMetaState); + b.bind(FsHealthService.class).toInstance(fsHealthService); + }); + + return module; + } + + private Module loadPersistentTasksService( + SettingsModule settingsModule, + ClusterService clusterService, + ThreadPool threadPool, + SystemIndices systemIndices, + FeatureService featureService, + IndexNameExpressionResolver indexNameExpressionResolver, + MetadataUpdateSettingsService metadataUpdateSettingsService, + MetadataCreateIndexService metadataCreateIndexService + ) { + PersistentTasksService persistentTasksService = new PersistentTasksService(clusterService, threadPool, client); + SystemIndexMigrationExecutor systemIndexMigrationExecutor = new SystemIndexMigrationExecutor( + client, + clusterService, + systemIndices, + metadataUpdateSettingsService, + metadataCreateIndexService, + settingsModule.getIndexScopedSettings() + ); + HealthNodeTaskExecutor healthNodeTaskExecutor = HealthNodeTaskExecutor.create( + clusterService, + persistentTasksService, + featureService, + settingsModule.getSettings(), + clusterService.getClusterSettings() + ); + Stream> builtinTaskExecutors = Stream.of(systemIndexMigrationExecutor, healthNodeTaskExecutor); + + Stream> pluginTaskExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) + .map(p -> p.getPersistentTasksExecutor(clusterService, threadPool, client, settingsModule, indexNameExpressionResolver)) + .flatMap(List::stream); + + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry( + Stream.concat(pluginTaskExecutors, builtinTaskExecutors).toList() + ); + PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService( + settingsModule.getSettings(), + registry, + clusterService, + threadPool + ); + resourcesToClose.add(persistentTasksClusterService); + + return b -> { + b.bind(PersistentTasksService.class).toInstance(persistentTasksService); + b.bind(HealthNodeTaskExecutor.class).toInstance(healthNodeTaskExecutor); + b.bind(PersistentTasksExecutorRegistry.class).toInstance(registry); + b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService); + }; } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 9b6e55383eea0..e2283ea9851d7 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -118,7 +118,8 @@ public NodeInfo info( boolean indices ) { return new NodeInfo( - Version.CURRENT, + // TODO: revert to Build.current().version() when Kibana is updated + Version.CURRENT.toString(), TransportVersion.current(), IndexVersion.current(), findComponentVersions(), diff --git a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java index cb7652bdc7b03..7af206a12ecc9 100644 --- a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -113,16 +111,6 @@ public int hashCode() { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - CompletionPersistentTaskAction.Request, - PersistentTaskResponse, - CompletionPersistentTaskAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, CompletionPersistentTaskAction action) { - super(client, action, new Request()); - } - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java index 7fac04a63993e..8e0ee8f87422e 100644 --- a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -83,22 +81,6 @@ public int hashCode() { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - RemovePersistentTaskAction.Request, - PersistentTaskResponse, - RemovePersistentTaskAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, RemovePersistentTaskAction action) { - super(client, action, new Request()); - } - - public final RequestBuilder setTaskId(String taskId) { - request.setTaskId(taskId); - return this; - } - - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java index c719eb318d571..d98abdffaf463 100644 --- a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -138,32 +136,6 @@ public void setParams(PersistentTaskParams params) { } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - StartPersistentTaskAction.Request, - PersistentTaskResponse, - StartPersistentTaskAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, StartPersistentTaskAction action) { - super(client, action, new Request()); - } - - public RequestBuilder setTaskId(String taskId) { - request.setTaskId(taskId); - return this; - } - - public RequestBuilder setAction(String action) { - request.setTaskName(action); - return this; - } - - public RequestBuilder setRequest(PersistentTaskParams params) { - request.setParams(params); - return this; - } - - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index 6074cc0e4ea35..f961a9fffec27 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -66,14 +64,26 @@ public void setTaskId(String taskId) { this.taskId = taskId; } + public String getTaskId() { + return taskId; + } + public void setAllocationId(long allocationId) { this.allocationId = allocationId; } + public long getAllocationId() { + return allocationId; + } + public void setState(PersistentTaskState state) { this.state = state; } + public PersistentTaskState getState() { + return state; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -110,26 +120,6 @@ public int hashCode() { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - UpdatePersistentTaskStatusAction.Request, - PersistentTaskResponse, - UpdatePersistentTaskStatusAction.RequestBuilder> { - - protected RequestBuilder(ElasticsearchClient client, UpdatePersistentTaskStatusAction action) { - super(client, action, new Request()); - } - - public final RequestBuilder setTaskId(String taskId) { - request.setTaskId(taskId); - return this; - } - - public final RequestBuilder setState(PersistentTaskState state) { - request.setState(state); - return this; - } - } - public static class TransportAction extends TransportMasterNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java index 774d47b583686..7f7a55762bf08 100644 --- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java +++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java @@ -242,8 +242,11 @@ public void clusterChanged(ClusterChangedEvent event) { this.shuttingDown = shutdownNodeIds.contains(clusterState.nodes().getLocalNodeId()); if (shuttingDown) { - setReady(false); - logger.info("marking node as not ready because it's shutting down"); + // only disable the probe and log if the probe is running + if (ready()) { + setReady(false); + logger.info("marking node as not ready because it's shutting down"); + } } else { if (clusterState.nodes().getLocalNodeId().equals(clusterState.nodes().getMasterNodeId())) { setReady(fileSettingsApplied); diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index 32c32369a5fae..630c0ea673c8b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -35,7 +35,7 @@ */ public final class RepositoriesModule { - public static final String METRIC_REQUESTS_COUNT = "repositories.requests.count"; + public static final String METRIC_REQUESTS_COUNT = "es.repositories.requests.count"; private final RepositoriesService repositoriesService; public RepositoriesModule( diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 4167717e09006..cd2b8c73fe90b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -70,14 +70,15 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.concurrent.ThrottledIterator; import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; @@ -176,6 +177,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected final ThreadPool threadPool; + public static final String STATELESS_SHARD_THREAD_NAME = "stateless_shard"; + public static final String STATELESS_TRANSLOG_THREAD_NAME = "stateless_translog"; + public static final String STATELESS_UPLOAD_THREAD_NAME = "stateless_upload"; + public static final String SNAPSHOT_PREFIX = "snap-"; public static final String INDEX_FILE_PREFIX = "index-"; @@ -455,6 +460,7 @@ protected void doStop() {} @Override protected void doClose() { + activityRefs.decRef(); BlobStore store; // to close blobStore if blobStore initialization is started during close synchronized (lock) { @@ -469,28 +475,14 @@ protected void doClose() { } } - // listeners to invoke when a restore completes and there are no more restores running - @Nullable - private List> emptyListeners; + private final SubscribableListener closedAndIdleListeners = new SubscribableListener<>(); - // Set of shard ids that this repository is currently restoring - private final Set ongoingRestores = new HashSet<>(); + private final RefCounted activityRefs = AbstractRefCounted.of(() -> closedAndIdleListeners.onResponse(null)); @Override public void awaitIdle() { - assert lifecycle.stoppedOrClosed(); - final PlainActionFuture future; - synchronized (ongoingRestores) { - if (ongoingRestores.isEmpty()) { - return; - } - future = new PlainActionFuture<>(); - if (emptyListeners == null) { - emptyListeners = new ArrayList<>(); - } - emptyListeners.add(future); - } - FutureUtils.get(future); + assert lifecycle.closed(); + PlainActionFuture.get(closedAndIdleListeners::addListener); } @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here @@ -1987,7 +1979,15 @@ public long getRestoreThrottleTimeInNanos() { } protected void assertSnapshotOrGenericThread() { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT, ThreadPool.Names.SNAPSHOT_META, ThreadPool.Names.GENERIC); + // The Stateless plugin adds custom thread pools for object store operations + assert ThreadPool.assertCurrentThreadPool( + ThreadPool.Names.SNAPSHOT, + ThreadPool.Names.SNAPSHOT_META, + ThreadPool.Names.GENERIC, + STATELESS_SHARD_THREAD_NAME, + STATELESS_TRANSLOG_THREAD_NAME, + STATELESS_UPLOAD_THREAD_NAME + ); } @Override @@ -3305,30 +3305,19 @@ public void restoreShard( ); final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); final BlobContainer container = shardContainer(indexId, snapshotShardId); - synchronized (ongoingRestores) { - if (store.isClosing()) { - restoreListener.onFailure(new AlreadyClosedException("store is closing")); - return; - } - if (lifecycle.started() == false) { - restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closed")); - return; - } - final boolean added = ongoingRestores.add(shardId); - assert added : "add restore for [" + shardId + "] that already has an existing restore"; + if (store.isClosing()) { + restoreListener.onFailure(new AlreadyClosedException("store is closing")); + return; } - executor.execute(ActionRunnable.wrap(ActionListener.runBefore(restoreListener, () -> { - final List> onEmptyListeners; - synchronized (ongoingRestores) { - if (ongoingRestores.remove(shardId) && ongoingRestores.isEmpty() && emptyListeners != null) { - onEmptyListeners = emptyListeners; - emptyListeners = null; - } else { - return; - } - } - ActionListener.onResponse(onEmptyListeners, null); - }), l -> { + if (lifecycle.started() == false) { + restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closed")); + return; + } + if (activityRefs.tryIncRef() == false) { + restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closing")); + return; + } + executor.execute(ActionRunnable.wrap(ActionListener.runBefore(restoreListener, activityRefs::decRef), l -> { final BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(container, snapshotId); final SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles(), null); new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState) { diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 83676925a3ae7..56c975e148ab5 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -113,7 +113,7 @@ protected boolean shouldRefreshFileState(ClusterState clusterState) { */ @Override protected void processFileChanges() throws ExecutionException, InterruptedException, IOException { - PlainActionFuture completion = PlainActionFuture.newFuture(); + PlainActionFuture completion = new PlainActionFuture<>(); logger.info("processing path [{}] for [{}]", watchedFile(), NAMESPACE); try ( var fis = Files.newInputStream(watchedFile()); diff --git a/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java b/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java index 9cfe7b84577db..ae267573b4cab 100644 --- a/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java +++ b/server/src/main/java/org/elasticsearch/rest/ChunkedRestResponseBody.java @@ -20,6 +20,8 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Streams; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -36,6 +38,8 @@ */ public interface ChunkedRestResponseBody extends Releasable { + Logger logger = LogManager.getLogger(ChunkedRestResponseBody.class); + /** * @return true once this response has been written fully. */ @@ -126,6 +130,9 @@ public ReleasableBytesReference encodeChunk(int sizeHint, Recycler rec ); target = null; return result; + } catch (Exception e) { + logger.error("failure encoding chunk", e); + throw e; } finally { if (target != null) { assert false : "failure encoding chunk"; @@ -212,6 +219,9 @@ public ReleasableBytesReference encodeChunk(int sizeHint, Recycler rec ); currentOutput = null; return result; + } catch (Exception e) { + logger.error("failure encoding text chunk", e); + throw e; } finally { if (currentOutput != null) { assert false : "failure encoding text chunk"; diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index b51468edff63b..6a5d6f99df64b 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.path.PathTrie; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; @@ -820,12 +821,12 @@ private void close() { private static class EncodedLengthTrackingChunkedRestResponseBody implements ChunkedRestResponseBody { private final ChunkedRestResponseBody delegate; - private final MethodHandlers methodHandlers; + private final RunOnce onCompletion; private long encodedLength = 0; private EncodedLengthTrackingChunkedRestResponseBody(ChunkedRestResponseBody delegate, MethodHandlers methodHandlers) { this.delegate = delegate; - this.methodHandlers = methodHandlers; + this.onCompletion = new RunOnce(() -> methodHandlers.addResponseStats(encodedLength)); } @Override @@ -837,6 +838,9 @@ public boolean isDone() { public ReleasableBytesReference encodeChunk(int sizeHint, Recycler recycler) throws IOException { final ReleasableBytesReference bytesReference = delegate.encodeChunk(sizeHint, recycler); encodedLength += bytesReference.length(); + if (isDone()) { + onCompletion.run(); + } return bytesReference; } @@ -848,7 +852,9 @@ public String getResponseContentTypeString() { @Override public void close() { delegate.close(); - methodHandlers.addResponseStats(encodedLength); + // the client might close the connection before we send the last chunk, in which case we won't have recorded the response in the + // stats yet, so we do it now: + onCompletion.run(); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java b/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java index a3bb1ed9d94dc..2edb042ea23e8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestChunkedToXContentListener.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action; import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.Releasable; import org.elasticsearch.rest.ChunkedRestResponseBody; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; @@ -37,10 +38,17 @@ public RestChunkedToXContentListener(RestChannel channel, ToXContent.Params para @Override protected void processResponse(Response response) throws IOException { channel.sendResponse( - RestResponse.chunked(getRestStatus(response), ChunkedRestResponseBody.fromXContent(response, params, channel, null)) + RestResponse.chunked( + getRestStatus(response), + ChunkedRestResponseBody.fromXContent(response, params, channel, releasableFromResponse(response)) + ) ); } + protected Releasable releasableFromResponse(Response response) { + return null; + } + protected RestStatus getRestStatus(Response response) { return RestStatus.OK; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestRefCountedChunkedToXContentListener.java b/server/src/main/java/org/elasticsearch/rest/action/RestRefCountedChunkedToXContentListener.java new file mode 100644 index 0000000000000..dfd9c40e0e107 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/RestRefCountedChunkedToXContentListener.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action; + +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.rest.RestChannel; + +/** + * Same as {@link RestChunkedToXContentListener} but decrements the ref count on the response it receives by one after serialization of the + * response. + */ +public class RestRefCountedChunkedToXContentListener extends RestChunkedToXContentListener< + Response> { + public RestRefCountedChunkedToXContentListener(RestChannel channel) { + super(channel); + } + + @Override + protected Releasable releasableFromResponse(Response response) { + response.mustIncRef(); + return Releasables.assertOnce(response::decRef); + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index 0191428e7ca82..fef7dc0cbdd37 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.NodeStatsLevel; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.client.internal.node.NodeClient; @@ -56,7 +57,7 @@ public List routes() { static { Map> map = new HashMap<>(); - for (NodesStatsRequest.Metric metric : NodesStatsRequest.Metric.values()) { + for (NodesStatsRequestParameters.Metric metric : NodesStatsRequestParameters.Metric.values()) { map.put(metric.metricName(), request -> request.addMetric(metric.metricName())); } map.put("indices", request -> request.indices(true)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java index ead9412334b84..630b8c9c40509 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -43,6 +44,7 @@ public String getName() { } @Override + @UpdateForV9 public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); @@ -55,12 +57,11 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC "close-index-wait_for_active_shards-index-setting", "?wait_for_active_shards=index-setting is now the default behaviour; the 'index-setting' value for this parameter " + "should no longer be used since it will become unsupported in version " - + (Version.V_7_0_0.major + 2) + + (Version.V_8_0_0.major + 1) ); // TODO in v9: // - throw an IllegalArgumentException here - // - record the removal of support for this value as a breaking change. - // - mention Version.V_8_0_0 in the code to ensure that we revisit this in v10 + // - record the removal of support for this value as a breaking change // TODO in v10: // - remove the IllegalArgumentException here } else if (waitForActiveShards != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index a04e23f289379..4c9ac8fcb9a3c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -65,9 +65,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (validationException != null) { throw validationException; } - final var responseFuture = new ListenableActionFuture(); - final var task = client.executeLocally(ForceMergeAction.INSTANCE, mergeRequest, responseFuture); - responseFuture.addListener(new LoggingTaskListener<>(task)); + final var responseListener = new SubscribableListener(); + final var task = client.executeLocally(ForceMergeAction.INSTANCE, mergeRequest, responseListener); + responseListener.addListener(new LoggingTaskListener<>(task)); return sendTask(client.getLocalNodeId(), task); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index b6e1240a3f85a..a8f6fa325b468 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -50,6 +51,7 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { + @UpdateForV9 // reject the deprecated ?local parameter private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 2f7468ee544bb..74eddca033398 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -46,6 +46,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); + updateSettingsRequest.reopen(request.paramAsBoolean("reopen", false)); updateSettingsRequest.fromXContent(request.contentParser()); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 2dc657582a0a1..5e9b2c8452579 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -63,8 +64,9 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli @Override public void processResponse(final ClusterStateResponse state) { NodesStatsRequest statsRequest = new NodesStatsRequest(nodes); + statsRequest.setIncludeShardsStats(false); statsRequest.clear() - .addMetric(NodesStatsRequest.Metric.FS.metricName()) + .addMetric(NodesStatsRequestParameters.Metric.FS.metricName()) .indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store)); client.admin().cluster().nodesStats(statsRequest, new RestResponseListener(channel) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java index 0afc010bd4b9d..cef831f06dfa1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestFielddataAction.java @@ -43,6 +43,7 @@ public String getName() { @Override protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true"); + nodesStatsRequest.setIncludeShardsStats(false); nodesStatsRequest.clear(); nodesStatsRequest.indices(true); String[] fields = request.paramAsStringArray("fields", null); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index e8395710ede03..39045a99aa4a2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -98,14 +99,15 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli ); final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); + nodesStatsRequest.setIncludeShardsStats(false); nodesStatsRequest.clear() .indices(true) .addMetrics( - NodesStatsRequest.Metric.JVM.metricName(), - NodesStatsRequest.Metric.OS.metricName(), - NodesStatsRequest.Metric.FS.metricName(), - NodesStatsRequest.Metric.PROCESS.metricName(), - NodesStatsRequest.Metric.SCRIPT.metricName() + NodesStatsRequestParameters.Metric.JVM.metricName(), + NodesStatsRequestParameters.Metric.OS.metricName(), + NodesStatsRequestParameters.Metric.FS.metricName(), + NodesStatsRequestParameters.Metric.PROCESS.metricName(), + NodesStatsRequestParameters.Metric.SCRIPT.metricName() ); nodesStatsRequest.indices().includeUnloadedSegments(request.paramAsBoolean("include_unloaded_segments", false)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index c94f40b83856e..9ca0dae8c8740 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -83,7 +84,8 @@ public void processResponse(final ClusterStateResponse clusterStateResponse) { @Override public void processResponse(final NodesInfoResponse nodesInfoResponse) { NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); - nodesStatsRequest.clear().addMetric(NodesStatsRequest.Metric.THREAD_POOL.metricName()); + nodesStatsRequest.setIncludeShardsStats(false); + nodesStatsRequest.clear().addMetric(NodesStatsRequestParameters.Metric.THREAD_POOL.metricName()); client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index e5c70fa4fe188..fed7d8606ba01 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -8,7 +8,6 @@ package org.elasticsearch.rest.action.document; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -110,10 +109,8 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { assert request.params().get("id") == null : "non-null id: " + request.params().get("id"); - if (request.params().get("op_type") == null && nodesInCluster.get().getMinNodeVersion().onOrAfter(Version.V_7_5_0)) { - // default to op_type create - request.params().put("op_type", "create"); - } + // default to op_type create + request.params().putIfAbsent("op_type", "create"); return super.prepareRequest(request, client); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java index 7bcc19cb17fa9..4300293a1336e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java @@ -40,10 +40,10 @@ import java.util.function.Predicate; import java.util.stream.Collectors; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.HTTP; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.INGEST; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.SCRIPT; -import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.THREAD_POOL; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.HTTP; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.INGEST; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.SCRIPT; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.THREAD_POOL; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; @ServerlessScope(Scope.PUBLIC) @@ -86,6 +86,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { var nodesStatsRequest = new NodesStatsRequest().clear(); + nodesStatsRequest.setIncludeShardsStats(false); var targets = Strings.tokenizeByCommaToSet(request.param("target")); if (targets.size() == 1 && targets.contains("_all")) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java new file mode 100644 index 0000000000000..e0d9dd95206cf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action.ingest; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.SimulateBulkAction; +import org.elasticsearch.action.bulk.SimulateBulkRequest; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * This is the REST endpoint for the simulate ingest API. This API executes all pipelines for a document (or documents) that would be + * executed if that document were sent to the given index. The JSON that would be indexed is returned to the user, along with the list of + * pipelines that were executed. The API allows the user to optionally send in substitute definitions for pipelines so that changes can be + * tried out without actually modifying the cluster state. + */ +@ServerlessScope(Scope.PUBLIC) +public class RestSimulateIngestAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of( + new Route(GET, "/_ingest/_simulate"), + new Route(POST, "/_ingest/_simulate"), + new Route(GET, "/_ingest/{index}/_simulate"), + new Route(POST, "/_ingest/{index}/_simulate") + ); + } + + @Override + public String getName() { + return "ingest_simulate_ingest_action"; + } + + @Override + @SuppressWarnings("unchecked") + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + String defaultIndex = request.param("index"); + FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + String defaultPipeline = request.param("pipeline"); + Tuple sourceTuple = request.contentOrSourceParam(); + Map sourceMap = XContentHelper.convertToMap(sourceTuple.v2(), false, sourceTuple.v1()).v2(); + SimulateBulkRequest bulkRequest = new SimulateBulkRequest( + (Map>) sourceMap.remove("pipeline_substitutions") + ); + BytesReference transformedData = convertToBulkRequestXContentBytes(sourceMap); + bulkRequest.add( + transformedData, + defaultIndex, + null, + defaultFetchSourceContext, + defaultPipeline, + null, + true, + true, + request.getXContentType(), + request.getRestApiVersion() + ); + return channel -> client.execute(SimulateBulkAction.INSTANCE, bulkRequest, new SimulateIngestRestToXContentListener(channel)); + } + + /* + * The simulate ingest API is intended to have inputs and outputs that are formatted similarly to the simulate pipeline API for the + * sake of consistency. But internally it uses the same code as the _bulk API, so that we have confidence that we are simulating what + * really happens on ingest. This method transforms simulate-style inputs into an input that the bulk API can accept. + * Non-private for unit testing + */ + static BytesReference convertToBulkRequestXContentBytes(Map sourceMap) throws IOException { + List> docs = ConfigurationUtils.readList(null, null, sourceMap, "docs"); + if (docs.isEmpty()) { + throw new IllegalArgumentException("must specify at least one document in [docs]"); + } + ByteBuffer[] buffers = new ByteBuffer[2 * docs.size()]; + int bufferCount = 0; + for (Map doc : docs) { + if ((doc != null) == false) { + throw new IllegalArgumentException("malformed [docs] section, should include an inner object"); + } + Map document = ConfigurationUtils.readMap(null, null, doc, "_source"); + String index = ConfigurationUtils.readOptionalStringProperty(null, null, doc, IngestDocument.Metadata.INDEX.getFieldName()); + String id = ConfigurationUtils.readOptionalStringProperty(null, null, doc, IngestDocument.Metadata.ID.getFieldName()); + XContentBuilder actionXContentBuilder = XContentFactory.contentBuilder(XContentType.JSON).lfAtEnd(); + actionXContentBuilder.startObject().field("index").startObject(); + if (index != null) { + actionXContentBuilder.field("_index", index); + } + if (id != null) { + actionXContentBuilder.field("_id", id); + } + actionXContentBuilder.endObject().endObject(); + buffers[bufferCount++] = ByteBuffer.wrap(BytesReference.bytes(actionXContentBuilder).toBytesRef().bytes); + XContentBuilder dataXContentBuilder = XContentFactory.contentBuilder(XContentType.JSON).lfAtEnd(); + dataXContentBuilder.startObject(); + for (String key : document.keySet()) { + dataXContentBuilder.field(key, document.get(key)); + } + dataXContentBuilder.endObject(); + buffers[bufferCount++] = ByteBuffer.wrap(BytesReference.bytes(dataXContentBuilder).toBytesRef().bytes); + } + return BytesReference.fromByteBuffers(buffers); + } + + /* + * The simulate ingest API is intended to have inputs and outputs that are formatted similarly to the simulate pipeline API for the + * sake of consistency. But internally it uses the same code as the _bulk API, so that we have confidence that we are simulating what + * really happens on ingest. This class is used in place of RestToXContentListener to transform simulate-style outputs into an + * simulate-style xcontent. + * Non-private for unit testing + */ + static class SimulateIngestRestToXContentListener extends RestToXContentListener { + + SimulateIngestRestToXContentListener(RestChannel channel) { + super(channel); + } + + @Override + public RestResponse buildResponse(BulkResponse response, XContentBuilder builder) throws Exception { + assert response.isFragment() == false; + toXContent(response, builder, channel.request()); + RestStatus restStatus = statusFunction.apply(response); + return new RestResponse(restStatus, builder); + } + + private static void toXContent(BulkResponse response, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("docs"); + for (BulkItemResponse item : response) { + builder.startObject(); + builder.startObject("doc"); + if (item.isFailed()) { + builder.field("_id", item.getFailure().getId()); + builder.field("_index", item.getFailure().getIndex()); + builder.startObject("error"); + ElasticsearchException.generateThrowableXContent(builder, params, item.getFailure().getCause()); + builder.endObject(); + } else { + item.getResponse().innerToXContent(builder, params); + } + builder.endObject(); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index c9bcaf6b5ff4d..c232e1a30c553 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -8,9 +8,9 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.CheckedBiConsumer; @@ -26,7 +26,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.usage.SearchUsageHolder; import org.elasticsearch.xcontent.XContent; @@ -82,7 +82,11 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ); return channel -> { final RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancellableClient.execute(MultiSearchAction.INSTANCE, multiSearchRequest, new RestChunkedToXContentListener<>(channel)); + cancellableClient.execute( + TransportMultiSearchAction.TYPE, + multiSearchRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 9a5aef4996209..41102a3568e30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -11,9 +11,9 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchContextId; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; @@ -29,7 +29,7 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -121,7 +121,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(SearchAction.INSTANCE, searchRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(TransportSearchAction.TYPE, searchRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 4c1df376ebf63..a8721503c7454 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -8,6 +8,8 @@ package org.elasticsearch.search; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -27,7 +29,12 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdLoader; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -37,6 +44,7 @@ import org.elasticsearch.index.search.NestedHelper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.aggregations.SearchContextAggregations; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchPhase; @@ -68,7 +76,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Executor; +import java.util.concurrent.ThreadPoolExecutor; import java.util.function.LongSupplier; +import java.util.function.ToLongFunction; final class DefaultSearchContext extends SearchContext { @@ -123,7 +133,6 @@ final class DefaultSearchContext extends SearchContext { private Query query; private ParsedQuery postFilter; private Query aliasFilter; - private int[] docIdsToLoad; private SearchContextAggregations aggregations; private SearchHighlightContext highlight; private SuggestionSearchContext suggest; @@ -143,57 +152,148 @@ final class DefaultSearchContext extends SearchContext { FetchPhase fetchPhase, boolean lowLevelCancellation, Executor executor, - int maximumNumberOfSlices, + SearchService.ResultsType resultsType, + boolean enableQueryPhaseParallelCollection, int minimumDocsPerSlice ) throws IOException { this.readerContext = readerContext; this.request = request; this.fetchPhase = fetchPhase; - this.searchType = request.searchType(); - this.shardTarget = shardTarget; - this.indexService = readerContext.indexService(); - this.indexShard = readerContext.indexShard(); - - Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); - if (executor == null) { - this.searcher = new ContextIndexSearcher( - engineSearcher.getIndexReader(), - engineSearcher.getSimilarity(), - engineSearcher.getQueryCache(), - engineSearcher.getQueryCachingPolicy(), - lowLevelCancellation - ); - } else { - this.searcher = new ContextIndexSearcher( - engineSearcher.getIndexReader(), - engineSearcher.getSimilarity(), - engineSearcher.getQueryCache(), - engineSearcher.getQueryCachingPolicy(), - lowLevelCancellation, - executor, - maximumNumberOfSlices, - minimumDocsPerSlice + boolean success = false; + try { + this.searchType = request.searchType(); + this.shardTarget = shardTarget; + this.indexService = readerContext.indexService(); + this.indexShard = readerContext.indexShard(); + + Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); + int maximumNumberOfSlices; + if (hasSyntheticSource(indexService)) { + // accessing synthetic source is not thread safe + maximumNumberOfSlices = 1; + } else { + maximumNumberOfSlices = determineMaximumNumberOfSlices( + executor, + request, + resultsType, + enableQueryPhaseParallelCollection, + field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) + ); + + } + if (executor == null) { + this.searcher = new ContextIndexSearcher( + engineSearcher.getIndexReader(), + engineSearcher.getSimilarity(), + engineSearcher.getQueryCache(), + engineSearcher.getQueryCachingPolicy(), + lowLevelCancellation + ); + } else { + this.searcher = new ContextIndexSearcher( + engineSearcher.getIndexReader(), + engineSearcher.getSimilarity(), + engineSearcher.getQueryCache(), + engineSearcher.getQueryCachingPolicy(), + lowLevelCancellation, + executor, + maximumNumberOfSlices, + minimumDocsPerSlice + ); + } + releasables.addAll(List.of(engineSearcher, searcher)); + this.relativeTimeSupplier = relativeTimeSupplier; + this.timeout = timeout; + searchExecutionContext = indexService.newSearchExecutionContext( + request.shardId().id(), + request.shardRequestIndex(), + searcher, + request::nowInMillis, + shardTarget.getClusterAlias(), + request.getRuntimeMappings() ); + queryBoost = request.indexBoost(); + this.lowLevelCancellation = lowLevelCancellation; + success = true; + } finally { + if (success == false) { + close(); + } } - releasables.addAll(List.of(engineSearcher, searcher)); + } - this.relativeTimeSupplier = relativeTimeSupplier; - this.timeout = timeout; - searchExecutionContext = indexService.newSearchExecutionContext( - request.shardId().id(), - request.shardRequestIndex(), - searcher, - request::nowInMillis, - shardTarget.getClusterAlias(), - request.getRuntimeMappings() - ); - queryBoost = request.indexBoost(); - this.lowLevelCancellation = lowLevelCancellation; + private static boolean hasSyntheticSource(IndexService indexService) { + DocumentMapper documentMapper = indexService.mapperService().documentMapper(); + if (documentMapper != null) { + return documentMapper.sourceMapper().isSynthetic(); + } + return false; + } + + static long getFieldCardinality(String field, IndexService indexService, DirectoryReader directoryReader) { + MappedFieldType mappedFieldType = indexService.mapperService().fieldType(field); + if (mappedFieldType == null) { + return -1; + } + IndexFieldData indexFieldData; + try { + indexFieldData = indexService.loadFielddata(mappedFieldType, FieldDataContext.noRuntimeFields("field cardinality")); + } catch (Exception e) { + // loading fielddata for runtime fields will fail, that's ok + return -1; + } + return getFieldCardinality(indexFieldData, directoryReader); + } + + static long getFieldCardinality(IndexFieldData indexFieldData, DirectoryReader directoryReader) { + if (indexFieldData instanceof IndexOrdinalsFieldData indexOrdinalsFieldData) { + if (indexOrdinalsFieldData.supportsGlobalOrdinalsMapping()) { + IndexOrdinalsFieldData global = indexOrdinalsFieldData.loadGlobal(directoryReader); + OrdinalMap ordinalMap = global.getOrdinalMap(); + if (ordinalMap != null) { + return ordinalMap.getValueCount(); + } + if (directoryReader.leaves().size() == 0) { + return 0; + } + return global.load(directoryReader.leaves().get(0)).getOrdinalsValues().getValueCount(); + } + } + return -1L; + } + + static int determineMaximumNumberOfSlices( + Executor executor, + ShardSearchRequest request, + SearchService.ResultsType resultsType, + boolean enableQueryPhaseParallelCollection, + ToLongFunction fieldCardinality + ) { + return executor instanceof ThreadPoolExecutor tpe + && isParallelCollectionSupportedForResults(resultsType, request.source(), fieldCardinality, enableQueryPhaseParallelCollection) + ? tpe.getMaximumPoolSize() + : 1; + } + + static boolean isParallelCollectionSupportedForResults( + SearchService.ResultsType resultsType, + SearchSourceBuilder source, + ToLongFunction fieldCardinality, + boolean isQueryPhaseParallelismEnabled + ) { + if (resultsType == SearchService.ResultsType.DFS) { + return true; + } + if (resultsType == SearchService.ResultsType.QUERY && isQueryPhaseParallelismEnabled) { + return source == null || source.supportsParallelCollection(fieldCardinality); + } + return false; } @Override public void addFetchResult() { this.fetchResult = new FetchSearchResult(this.readerContext.id(), this.shardTarget); + addReleasable(fetchResult::decRef); } @Override @@ -468,11 +568,6 @@ public boolean sourceRequested() { return fetchSourceContext != null && fetchSourceContext.fetchSource(); } - @Override - public boolean hasFetchSourceContext() { - return fetchSourceContext != null; - } - @Override public FetchSourceContext fetchSourceContext() { return this.fetchSourceContext; @@ -724,17 +819,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { this.seqAndPrimaryTerm = seqNoAndPrimaryTerm; } - @Override - public int[] docIdsToLoad() { - return docIdsToLoad; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - this.docIdsToLoad = docIdsToLoad; - return this; - } - @Override public DfsSearchResult dfsResult() { return dfsResult; diff --git a/server/src/main/java/org/elasticsearch/search/MultiValueMode.java b/server/src/main/java/org/elasticsearch/search/MultiValueMode.java index 9137d5c97248d..ad314a97a3a67 100644 --- a/server/src/main/java/org/elasticsearch/search/MultiValueMode.java +++ b/server/src/main/java/org/elasticsearch/search/MultiValueMode.java @@ -543,7 +543,7 @@ public int docID() { } @Override - public long longValue() throws IOException { + public long longValue() { return value; } }; @@ -571,7 +571,6 @@ public NumericDocValues select( final long missingValue, final BitSet parentDocs, final DocIdSetIterator childDocs, - int maxDoc, int maxChildren ) throws IOException { if (parentDocs == null || childDocs == null) { @@ -654,7 +653,7 @@ public boolean advanceExact(int target) throws IOException { } @Override - public double doubleValue() throws IOException { + public double doubleValue() { return this.value; } }; @@ -682,7 +681,6 @@ public NumericDoubleValues select( final double missingValue, final BitSet parentDocs, final DocIdSetIterator childDocs, - int maxDoc, int maxChildren ) throws IOException { if (parentDocs == null || childDocs == null) { @@ -804,7 +802,6 @@ public BinaryDocValues select( final BytesRef missingValue, final BitSet parentDocs, final DocIdSetIterator childDocs, - int maxDoc, int maxChildren ) throws IOException { if (parentDocs == null || childDocs == null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 9fa99bb4a773f..7e1699307c5ee 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -51,7 +51,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -72,7 +71,7 @@ * * @see SearchHits */ -public final class SearchHit implements Writeable, ToXContentObject, Iterable { +public final class SearchHit implements Writeable, ToXContentObject { private final transient int docId; @@ -156,18 +155,8 @@ public SearchHit(StreamInput in) throws IOException { if (in.readBoolean()) { explanation = readExplanation(in); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { - documentFields.putAll(in.readMap(DocumentField::new)); - metaFields.putAll(in.readMap(DocumentField::new)); - } else { - Map fields = readFields(in); - fields.forEach( - (fieldName, docField) -> (MapperService.isMetadataFieldStatic(fieldName) ? metaFields : documentFields).put( - fieldName, - docField - ) - ); - } + documentFields.putAll(in.readMap(DocumentField::new)); + metaFields.putAll(in.readMap(DocumentField::new)); int size = in.readVInt(); if (size == 0) { @@ -213,33 +202,6 @@ public SearchHit(StreamInput in) throws IOException { private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); - private static Map readFields(StreamInput in) throws IOException { - Map fields; - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); - } else if (size == 1) { - DocumentField hitField = new DocumentField(in); - fields = singletonMap(hitField.getName(), hitField); - } else { - fields = Maps.newMapWithExpectedSize(size); - for (int i = 0; i < size; i++) { - DocumentField field = new DocumentField(in); - fields.put(field.getName(), field); - } - fields = unmodifiableMap(fields); - } - return fields; - } - - private static void writeFields(StreamOutput out, Map fields) throws IOException { - if (fields == null) { - out.writeVInt(0); - } else { - out.writeCollection(fields.values()); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeFloat(score); @@ -263,12 +225,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); writeExplanation(out, explanation); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { - out.writeMap(documentFields, StreamOutput::writeWriteable); - out.writeMap(metaFields, StreamOutput::writeWriteable); - } else { - writeFields(out, this.getFields()); - } + out.writeMap(documentFields, StreamOutput::writeWriteable); + out.writeMap(metaFields, StreamOutput::writeWriteable); if (highlightFields == null) { out.writeVInt(0); } else { @@ -429,13 +387,6 @@ public Map getSourceAsMap() { return sourceAsMap; } - @Override - public Iterator iterator() { - // need to join the fields and metadata fields - Map allFields = this.getFields(); - return allFields.values().iterator(); - } - /** * The hit field matching the given field name. */ @@ -1001,7 +952,7 @@ private static Map parseHighlightFields(XContentParser p Map highlightFields = new HashMap<>(); while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { HighlightField highlightField = HighlightField.fromXContent(parser); - highlightFields.put(highlightField.getName(), highlightField); + highlightFields.put(highlightField.name(), highlightField); } return highlightFields; } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 44a8f641fae91..548e3fea9d91c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; @@ -134,7 +135,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -544,7 +544,7 @@ public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, })); } - private void ensureAfterSeqNoRefreshed( + private void ensureAfterSeqNoRefreshed( IndexShard shard, ShardSearchRequest request, CheckedSupplier executable, @@ -648,8 +648,12 @@ private IndexShard getShard(ShardSearchRequest request) { return indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); } - private static void runAsync(Executor executor, CheckedSupplier executable, ActionListener listener) { - executor.execute(ActionRunnable.supply(listener, executable)); + private static void runAsync( + Executor executor, + CheckedSupplier executable, + ActionListener listener + ) { + executor.execute(ActionRunnable.supplyAndDecRef(listener, executable)); } /** @@ -686,6 +690,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh final RescoreDocIds rescoreDocIds = context.rescoreDocIds(); context.queryResult().setRescoreDocIds(rescoreDocIds); readerContext.setRescoreDocIds(rescoreDocIds); + // inc-ref query result because we close the SearchContext that references it in this try-with-resources block context.queryResult().incRef(); return context.queryResult(); } @@ -707,15 +712,14 @@ private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchCon Releasable scope = tracer.withScope(SpanId.forTask(context.getTask())); SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime) ) { - shortcutDocIdsToLoad(context); - fetchPhase.execute(context); + fetchPhase.execute(context, shortcutDocIdsToLoad(context)); if (reader.singleSession()) { freeReaderContext(reader.id()); } executor.success(); } // This will incRef the QuerySearchResult when it gets created - return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); + return QueryFetchSearchResult.of(context.queryResult(), context.fetchResult()); } public void executeQueryPhase( @@ -739,7 +743,7 @@ public void executeQueryPhase( SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext) ) { searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(null)); - processScroll(request, readerContext, searchContext); + processScroll(request, searchContext); QueryPhase.execute(searchContext); executor.success(); readerContext.setRescoreDocIds(searchContext.rescoreDocIds()); @@ -772,7 +776,8 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, ) { searchContext.searcher().setAggregatedDfs(request.dfs()); QueryPhase.execute(searchContext); - if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { + final QuerySearchResult queryResult = searchContext.queryResult(); + if (queryResult.hasSearchContext() == false && readerContext.singleSession()) { // no hits, we can release the context since there will be no fetch phase freeReaderContext(readerContext.id()); } @@ -781,10 +786,11 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, // and receive them back in the fetch phase. // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); - searchContext.queryResult().setRescoreDocIds(rescoreDocIds); + queryResult.setRescoreDocIds(rescoreDocIds); readerContext.setRescoreDocIds(rescoreDocIds); - searchContext.queryResult().incRef(); - return searchContext.queryResult(); + // inc-ref query result because we close the SearchContext that references it in this try-with-resources block + queryResult.incRef(); + return queryResult; } catch (Exception e) { assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); logger.trace("Query phase failed", e); @@ -830,7 +836,7 @@ public void executeFetchPhase( ) { searchContext.assignRescoreDocIds(readerContext.getRescoreDocIds(null)); searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(null)); - processScroll(request, readerContext, searchContext); + processScroll(request, searchContext); searchContext.addQueryResult(); QueryPhase.execute(searchContext); final long afterQueryTime = executor.success(); @@ -856,17 +862,19 @@ public void executeFetchPhase(ShardFetchRequest request, SearchShardTask task, A } searchContext.assignRescoreDocIds(readerContext.getRescoreDocIds(request.getRescoreDocIds())); searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(request.getAggregatedDfs())); - searchContext.docIdsToLoad(request.docIds()); try ( SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext, true, System.nanoTime()) ) { - fetchPhase.execute(searchContext); + fetchPhase.execute(searchContext, request.docIds()); if (readerContext.singleSession()) { freeReaderContext(request.contextId()); } executor.success(); } - return searchContext.fetchResult(); + var fetchResult = searchContext.fetchResult(); + // inc-ref fetch result because we close the SearchContext that references it in this try-with-resources block + fetchResult.incRef(); + return fetchResult; } catch (Exception e) { assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); // we handle the failure in the failure listener below @@ -1078,7 +1086,6 @@ private DefaultSearchContext createSearchContext( request.getClusterAlias() ); ExecutorService executor = this.enableSearchWorkerThreads ? threadPool.executor(Names.SEARCH_WORKER) : null; - int maximumNumberOfSlices = determineMaximumNumberOfSlices(executor, request, resultsType); searchContext = new DefaultSearchContext( reader, request, @@ -1088,7 +1095,8 @@ private DefaultSearchContext createSearchContext( fetchPhase, lowLevelCancellation, executor, - maximumNumberOfSlices, + resultsType, + enableQueryPhaseParallelCollection, minimumDocsPerSlice ); // we clone the query shard context here just for rewriting otherwise we @@ -1109,27 +1117,6 @@ private DefaultSearchContext createSearchContext( return searchContext; } - int determineMaximumNumberOfSlices(ExecutorService executor, ShardSearchRequest request, ResultsType resultsType) { - return executor instanceof ThreadPoolExecutor tpe - && isParallelCollectionSupportedForResults(resultsType, request.source(), this.enableQueryPhaseParallelCollection) - ? tpe.getMaximumPoolSize() - : 1; - } - - static boolean isParallelCollectionSupportedForResults( - ResultsType resultsType, - SearchSourceBuilder source, - boolean isQueryPhaseParallelismEnabled - ) { - if (resultsType == ResultsType.DFS) { - return true; - } - if (resultsType == ResultsType.QUERY && isQueryPhaseParallelismEnabled) { - return source == null || source.supportsParallelCollection(); - } - return false; - } - private void freeAllContextForIndex(Index index) { assert index != null; for (ReaderContext ctx : activeReaders.values()) { @@ -1321,11 +1308,9 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.addQuerySearchResultReleasable(aggContext); try { final AggregatorFactories factories = source.aggregations().build(aggContext, null); - final Supplier supplier = () -> aggReduceContextBuilder( - context::isCancelled, - source.aggregations() + context.aggregations( + new SearchContextAggregations(factories, () -> aggReduceContextBuilder(context::isCancelled, source.aggregations())) ); - context.aggregations(new SearchContextAggregations(factories, supplier)); } catch (IOException e) { throw new AggregationInitializationException("Failed to create aggregators", e); } @@ -1465,7 +1450,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc * Shortcut ids to load, we load only "from" and up to "size". The phase controller * handles this as well since the result is always size * shards for Q_T_F */ - private static void shortcutDocIdsToLoad(SearchContext context) { + private static int[] shortcutDocIdsToLoad(SearchContext context) { final int[] docIdsToLoad; int docsOffset = 0; final Suggest suggest = context.queryResult().suggest(); @@ -1503,10 +1488,10 @@ private static void shortcutDocIdsToLoad(SearchContext context) { docIdsToLoad[docsOffset++] = option.getDoc().doc; } } - context.docIdsToLoad(docIdsToLoad); + return docIdsToLoad; } - private static void processScroll(InternalScrollSearchRequest request, ReaderContext reader, SearchContext context) { + private static void processScroll(InternalScrollSearchRequest request, SearchContext context) { // process scroll context.from(context.from() + context.size()); context.scrollContext().scroll = request.scroll(); @@ -1584,14 +1569,6 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set listener) { - try { - listener.onResponse(canMatch(request)); - } catch (IOException e) { - listener.onFailure(e); - } - } - public void canMatch(CanMatchNodeRequest request, ActionListener listener) { final List shardSearchRequests = request.createShardSearchRequests(); final List responses = new ArrayList<>(shardSearchRequests.size()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 6aa545c981fa3..defbb0849bb47 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -25,6 +25,7 @@ import java.util.Optional; import java.util.Set; import java.util.function.Consumer; +import java.util.function.ToLongFunction; /** * A factory that knows how to create an {@link Aggregator} of a specific type. @@ -223,12 +224,12 @@ public boolean isInSortOrderExecutionRequired() { * Return false if this aggregation or any of the child aggregations does not support parallel collection. * As a result, a request including such aggregation is always executed sequentially despite concurrency is enabled for the query phase. */ - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { if (isInSortOrderExecutionRequired()) { return false; } for (AggregationBuilder builder : factoriesBuilder.getAggregatorFactories()) { - if (builder.supportsParallelCollection() == false) { + if (builder.supportsParallelCollection(fieldCardinalityResolver) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index be109b2909bcc..795f51a729ed6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -142,8 +142,9 @@ public final Function pointReaderIfAvailable(ValuesSourceConfig * @return the cumulative size in bytes allocated by this aggregator to service this request */ protected long addRequestCircuitBreakerBytes(long bytes) { - // Only use the potential to circuit break if bytes are being incremented - if (bytes > 0) { + // Only use the potential to circuit break if bytes are being incremented, In the case of 0 + // bytes, it will trigger the parent circuit breaker. + if (bytes >= 0) { context.breaker().addEstimateBytesAndMaybeBreak(bytes, ""); } else { context.breaker().addWithoutBreaking(bytes); @@ -267,8 +268,8 @@ public Aggregator[] subAggregators() { public Aggregator subAggregator(String aggName) { if (subAggregatorbyName == null) { subAggregatorbyName = Maps.newMapWithExpectedSize(subAggregators.length); - for (int i = 0; i < subAggregators.length; i++) { - subAggregatorbyName.put(subAggregators[i].name(), subAggregators[i]); + for (Aggregator subAggregator : subAggregators) { + subAggregatorbyName.put(subAggregator.name(), subAggregator); } } return subAggregatorbyName.get(aggName); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 0738303020de5..7b7c41165b51e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -42,6 +42,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.ToLongFunction; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -338,9 +339,9 @@ public boolean isInSortOrderExecutionRequired() { * As a result, a request including such aggregation is always executed sequentially despite concurrency is enabled for the query * phase. */ - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { for (AggregationBuilder builder : aggregationBuilders) { - if (builder.supportsParallelCollection() == false) { + if (builder.supportsParallelCollection(fieldCardinalityResolver) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index c9f937b489a73..ff1ca58d351e3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -26,6 +26,18 @@ public abstract class InternalMultiBucketAggregation< A extends InternalMultiBucketAggregation, B extends InternalMultiBucketAggregation.InternalBucket> extends InternalAggregation implements MultiBucketsAggregation { + /** + * When we pre-count the empty buckets we report them periodically + * because you can configure the date_histogram to create an astounding + * number of buckets. It'd take a while to count that high only to abort. + * So we report every couple thousand buckets. It's be simpler to report + * every single bucket we plan to allocate one at a time but that'd cause + * needless overhead on the circuit breakers. Counting a couple thousand + * buckets is plenty fast to fail this quickly in pathological cases and + * plenty large to keep the overhead minimal. + */ + protected static final int REPORT_EMPTY_EVERY = 10_000; + public InternalMultiBucketAggregation(String name, Map metadata) { super(name, metadata); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java index d950706b46b82..15d4a03be81f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -289,7 +289,7 @@ byte id() { @Override public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) { Comparator comparator = comparator(); - return (lhs, rhs) -> comparator.compare(lhs, rhs); + return comparator::compare; } @Override @@ -388,7 +388,6 @@ private static Comparator comparingKeys() { /** * @return compare by {@link Bucket#getKey()} that will be in the bucket once it is reduced */ - @SuppressWarnings("unchecked") private static Comparator> comparingDelayedKeys() { return DelayedBucket::compareKey; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java index e98762f462243..b956658f1226d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java @@ -183,17 +183,16 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx) ); } } - switch (leafCollectors.size()) { - case 0: + return switch (leafCollectors.size()) { + case 0 -> { if (terminateIfNoop) { throw new CollectionTerminatedException(); } - return LeafBucketCollector.NO_OP_COLLECTOR; - case 1: - return leafCollectors.get(0); - default: - return new MultiLeafBucketCollector(leafCollectors, cacheScores); - } + yield LeafBucketCollector.NO_OP_COLLECTOR; + } + case 1 -> leafCollectors.get(0); + default -> new MultiLeafBucketCollector(leafCollectors, cacheScores); + }; } private static class MultiLeafBucketCollector extends LeafBucketCollector { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java index 9980918badfd5..61427b446cb6d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java @@ -102,7 +102,7 @@ static class PriorityQueueTopBucketBuilder= ArrayUtil.MAX_ARRAY_LENGTH) { throw new IllegalArgumentException("can't reduce more than [" + ArrayUtil.MAX_ARRAY_LENGTH + "] buckets"); } - queue = new PriorityQueue>(size) { + queue = new PriorityQueue<>(size) { private final Comparator> comparator = order.delayedBucketComparator(); @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index b33abb0f95824..7c3c6f8397979 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -40,11 +40,7 @@ * this collector. */ public class BestBucketsDeferringCollector extends DeferringBucketCollector { - static class Entry { - final AggregationExecutionContext aggCtx; - final PackedLongValues docDeltas; - final PackedLongValues buckets; - + record Entry(AggregationExecutionContext aggCtx, PackedLongValues docDeltas, PackedLongValues buckets) { Entry(AggregationExecutionContext aggCtx, PackedLongValues docDeltas, PackedLongValues buckets) { this.aggCtx = Objects.requireNonNull(aggCtx); this.docDeltas = Objects.requireNonNull(docDeltas); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 06456b2396522..ec8117cf03135 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -223,9 +223,9 @@ protected final void buildSubAggsForAllBuckets( } InternalAggregations[] results = buildSubAggsForBuckets(bucketOrdsToCollect); s = 0; - for (int r = 0; r < buckets.length; r++) { - for (int b = 0; b < buckets[r].length; b++) { - setAggs.accept(buckets[r][b], results[s++]); + for (B[] bucket : buckets) { + for (int b = 0; b < bucket.length; b++) { + setAggs.accept(bucket[b], results[s++]); } } } @@ -330,8 +330,8 @@ protected final InternalAggregation[] buildAggregationsForVariableBuckets( } long[] bucketOrdsToCollect = new long[(int) totalOrdsToCollect]; int b = 0; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); + for (long owningBucketOrd : owningBucketOrds) { + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); while (ordsEnum.next()) { bucketOrdsToCollect[b++] = ordsEnum.ord(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index ca88d50898763..59fec0dd1540a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.ToLongFunction; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; @@ -302,4 +303,14 @@ public boolean equals(Object obj) { public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; } + + @Override + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + for (CompositeValuesSourceBuilder source : sources) { + if (source.supportsParallelCollection(fieldCardinalityResolver) == false) { + return false; + } + } + return super.supportsParallelCollection(fieldCardinalityResolver); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index dff95332d3f16..cee90f55597b2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -256,22 +256,19 @@ private void finishLeaf() { /** Return true if the provided field may have multiple values per document in the leaf **/ private static boolean isMaybeMultivalued(LeafReaderContext context, SortField sortField) throws IOException { SortField.Type type = IndexSortConfig.getSortFieldType(sortField); - switch (type) { - case STRING: + return switch (type) { + case STRING -> { final SortedSetDocValues v1 = context.reader().getSortedSetDocValues(sortField.getField()); - return v1 != null && DocValues.unwrapSingleton(v1) == null; - - case DOUBLE: - case FLOAT: - case LONG: - case INT: + yield v1 != null && DocValues.unwrapSingleton(v1) == null; + } + case DOUBLE, FLOAT, LONG, INT -> { final SortedNumericDocValues v2 = context.reader().getSortedNumericDocValues(sortField.getField()); - return v2 != null && DocValues.unwrapSingleton(v2) == null; - - default: + yield v2 != null && DocValues.unwrapSingleton(v2) == null; + } + default -> // we have no clue whether the field is multi-valued or not so we assume it is. - return true; - } + true; + }; } /** @@ -631,13 +628,5 @@ public void collectDebugInfo(BiConsumer add) { } } - private static class Entry { - final AggregationExecutionContext aggCtx; - final DocIdSet docIdSet; - - Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) { - this.aggCtx = aggCtx; - this.docIdSet = docIdSet; - } - } + private record Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) {} } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index 2c4eb02dfa6d6..0a7f6a26f580b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -29,7 +29,7 @@ */ final class CompositeValuesCollectorQueue extends PriorityQueue implements Releasable { private class Slot { - int value; + final int value; Slot(int initial) { this.value = initial; @@ -83,7 +83,7 @@ private interface CompetitiveBoundsChangedListener { // tracking the highest competitive value. if (arrays[0] instanceof GlobalOrdinalValuesSource globalOrdinalValuesSource) { if (shouldApplyGlobalOrdinalDynamicPruningForLeadingSource(sources, size, indexReader)) { - competitiveBoundsChangedListener = topSlot -> globalOrdinalValuesSource.updateHighestCompetitiveValue(topSlot); + competitiveBoundsChangedListener = globalOrdinalValuesSource::updateHighestCompetitiveValue; } else { competitiveBoundsChangedListener = null; } @@ -207,8 +207,8 @@ long getDocCount(int slot) { * Copies the current value in slot. */ private void copyCurrent(int slot, long value) { - for (int i = 0; i < arrays.length; i++) { - arrays[i].copyCurrent(slot); + for (SingleDimensionValuesSource array : arrays) { + array.copyCurrent(slot); } docCounts = bigArrays.grow(docCounts, slot + 1); docCounts.set(slot, value); @@ -238,12 +238,12 @@ int compare(int slot1, int slot2) { */ boolean equals(int slot1, int slot2) { assert slot2 != CANDIDATE_SLOT; - for (int i = 0; i < arrays.length; i++) { + for (SingleDimensionValuesSource array : arrays) { final int cmp; if (slot1 == CANDIDATE_SLOT) { - cmp = arrays[i].compareCurrent(slot2); + cmp = array.compareCurrent(slot2); } else { - cmp = arrays[i].compare(slot1, slot2); + cmp = array.compare(slot1, slot2); } if (cmp != 0) { return false; @@ -257,8 +257,8 @@ boolean equals(int slot1, int slot2) { */ int hashCode(int slot) { int result = 1; - for (int i = 0; i < arrays.length; i++) { - result = 31 * result + (slot == CANDIDATE_SLOT ? arrays[i].hashCodeCurrent() : arrays[i].hashCode(slot)); + for (SingleDimensionValuesSource array : arrays) { + result = 31 * result + (slot == CANDIDATE_SLOT ? array.hashCodeCurrent() : array.hashCode(slot)); } return result; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 6bbebc0ec9e5e..af7e450ac8bda 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.time.ZoneId; import java.util.Objects; +import java.util.function.ToLongFunction; /** * A {@link ValuesSource} builder for {@link CompositeAggregationBuilder} @@ -325,4 +326,12 @@ public final CompositeValuesSourceConfig build(AggregationContext context) throw protected ZoneId timeZone() { return null; } + + /** + * Return false if this composite source does not support parallel collection. + * As a result, a request including such aggregation is always executed sequentially despite concurrency is enabled for the query phase. + */ + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 927104a92deb2..f2c601e412f92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -628,10 +628,10 @@ public Object get(Object key) { @Override public Set> entrySet() { - return new AbstractSet>() { + return new AbstractSet<>() { @Override public Iterator> iterator() { - return new Iterator>() { + return new Iterator<>() { int pos = 0; @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 3d79509ad9377..ca9968834e611 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -43,7 +43,7 @@ class LongValuesSource extends SingleDimensionValuesSource { private final CheckedFunction docValuesFunc; private final LongUnaryOperator rounding; - private BitArray bits; + private final BitArray bits; private LongArray values; private long currentValue; private boolean missingCurrentValue; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java index 0c3ec9a521b8e..847e35cf0d4ea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java @@ -32,7 +32,7 @@ public class ParsedComposite extends ParsedMultiBucketAggregation ParsedComposite.ParsedBucket.fromXContent(parser), parser -> null); + declareMultiBucketAggregationFields(PARSER, ParsedBucket::fromXContent, parser -> null); } private Map afterKey; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index e5e89d94c803b..18591b1f3719b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -29,6 +30,7 @@ import java.util.List; import java.util.function.LongConsumer; import java.util.function.LongUnaryOperator; +import java.util.function.ToLongFunction; /** * A {@link CompositeValuesSourceBuilder} that builds a {@link ValuesSource} from a {@link Script} or @@ -215,4 +217,13 @@ protected CompositeValuesSourceConfig innerBuild(ValuesSourceRegistry registry, return registry.getAggregator(REGISTRY_KEY, config) .apply(config, name, script() != null, format(), missingBucket(), missingOrder(), order()); } + + @Override + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + if (script() == null) { + long cardinality = fieldCardinalityResolver.applyAsLong(field()); + return cardinality != -1 && cardinality <= TermsAggregationBuilder.KEY_ORDER_CONCURRENCY_THRESHOLD; + } + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java index e0792fca6c28f..69dcc8d3da117 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java @@ -140,7 +140,7 @@ public static FiltersAggregator build( Map metadata ) throws IOException { FilterByFilterAggregator.AdapterBuilder filterByFilterBuilder = - new FilterByFilterAggregator.AdapterBuilder( + new FilterByFilterAggregator.AdapterBuilder<>( name, keyed, keyedBucket, @@ -214,7 +214,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I (offsetInOwningOrd, docCount, subAggregationResults) -> { if (offsetInOwningOrd < filters.size()) { return new InternalFilters.InternalBucket( - filters.get(offsetInOwningOrd).key().toString(), + filters.get(offsetInOwningOrd).key(), docCount, subAggregationResults, keyed, @@ -232,13 +232,7 @@ public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(filters.size() + (otherBucketKey == null ? 0 : 1)); for (QueryToFilterAdapter filter : filters) { - InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket( - filter.key().toString(), - 0, - subAggs, - keyed, - keyedBucket - ); + InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(filter.key(), 0, subAggs, keyed, keyedBucket); buckets.add(bucket); } @@ -300,11 +294,17 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt final int numFilters = filters().size(); List filterWrappers = new ArrayList<>(); long totalCost = 0; + // trigger the parent circuit breaker to make sure we have enough heap to build the first scorer. + // note we might still fail if the scorer is huge. + addRequestCircuitBreakerBytes(0L); for (int filterOrd = 0; filterOrd < numFilters; filterOrd++) { Scorer randomAccessScorer = filters().get(filterOrd).randomAccessScorer(aggCtx.getLeafReaderContext()); if (randomAccessScorer == null) { continue; } + // scorer can take a fair amount of heap, and we have no means to estimate the size, so + // we trigger the parent circuit breaker to at least fail if we are running out of heap + addRequestCircuitBreakerBytes(0L); totalCost += randomAccessScorer.iterator().cost(); filterWrappers.add( randomAccessScorer.twoPhaseIterator() == null diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index ff9495ca4d825..8bc1e3d17642a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -64,7 +64,7 @@ public static ObjectParser crea parser.declareInt(GeoGridAggregationBuilder::size, FIELD_SIZE); parser.declareInt(GeoGridAggregationBuilder::shardSize, FIELD_SHARD_SIZE); parser.declareField( - (p, builder, context) -> { builder.setGeoBoundingBox(GeoBoundingBox.parseBoundingBox(p)); }, + (p, builder, context) -> builder.setGeoBoundingBox(GeoBoundingBox.parseBoundingBox(p)), GeoBoundingBox.BOUNDS_FIELD, org.elasticsearch.xcontent.ObjectParser.ValueType.OBJECT ); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java index e5fd0aa10ced2..52f63bf24be11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileUtils.java @@ -203,7 +203,7 @@ public static int[] parseHash(String hashAsString) { public static String stringEncode(long hash) { final int[] res = parseHash(hash); validateZXY(res[0], res[1], res[2]); - return "" + res[0] + "/" + res[1] + "/" + res[2]; + return res[0] + "/" + res[1] + "/" + res[2]; } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index 2371506082f1b..48b361592519c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -79,7 +79,7 @@ public String getPreferredName() { return preferredName; } - private String preferredName; + private final String preferredName; IntervalTypeEnum(String preferredName) { this.preferredName = preferredName; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index ed883a4b04d6b..4ffc9abdc2202 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -373,18 +373,6 @@ protected Bucket reduceBucket(List buckets, AggregationReduceContext con return createBucket(buckets.get(0).key, docCount, aggs); } - /** - * When we pre-count the empty buckets we report them periodically - * because you can configure the date_histogram to create an astounding - * number of buckets. It'd take a while to count that high only to abort. - * So we report every couple thousand buckets. It's be simpler to report - * every single bucket we plan to allocate one at a time but that'd cause - * needless overhead on the circuit breakers. Counting a couple thousand - * buckets is plenty fast to fail this quickly in pathological cases and - * plenty large to keep the overhead minimal. - */ - private static final int REPORT_EMPTY_EVERY = 10_000; - private void addEmptyBuckets(List list, AggregationReduceContext reduceContext) { /* * Make sure we have space for the empty buckets we're going to add by diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index caef13221b0f3..6ce723d12db26 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -291,10 +291,11 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent(histogram.buckets.iterator())); + pq.add(new IteratorAndCurrent<>(histogram.buckets.iterator())); } } + int consumeBucketCount = 0; List reducedBuckets = new ArrayList<>(); if (pq.size() > 0) { // list of buckets coming from different shards that have the same key @@ -310,6 +311,10 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent= minDocCount || reduceContext.isFinalReduce() == false) { reducedBuckets.add(reduced); + if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); + consumeBucketCount = 0; + } } currentBuckets.clear(); key = top.current().key; @@ -330,10 +335,15 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent= minDocCount || reduceContext.isFinalReduce() == false) { reducedBuckets.add(reduced); + if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); + consumeBucketCount = 0; + } } } } + reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); return reducedBuckets; } @@ -358,18 +368,6 @@ private double round(double key) { return Math.floor((key - emptyBucketInfo.offset) / emptyBucketInfo.interval) * emptyBucketInfo.interval + emptyBucketInfo.offset; } - /** - * When we pre-count the empty buckets we report them periodically - * because you can configure the histogram to create more buckets than - * there are atoms in the universe. It'd take a while to count that high - * only to abort. So we report every couple thousand buckets. It's be - * simpler to report every single bucket we plan to allocate one at a time - * but that'd cause needless overhead on the circuit breakers. Counting a - * couple thousand buckets is plenty fast to fail this quickly in - * pathological cases and plenty large to keep the overhead minimal. - */ - private static final int REPORT_EMPTY_EVERY = 10_000; - private void addEmptyBuckets(List list, AggregationReduceContext reduceContext) { /* * Make sure we have space for the empty buckets we're going to add by @@ -377,7 +375,7 @@ private void addEmptyBuckets(List list, AggregationReduceContext reduceC * consumeBucketsAndMaybeBreak. */ class Counter implements DoubleConsumer { - private int size = list.size(); + private int size = 0; @Override public void accept(double key) { @@ -456,11 +454,9 @@ private void iterateEmptyBuckets(List list, ListIterator iter, D @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { List reducedBuckets = reduceBuckets(aggregations, reduceContext); - boolean alreadyAccountedForBuckets = false; if (reduceContext.isFinalReduce()) { if (minDocCount == 0) { addEmptyBuckets(reducedBuckets, reduceContext); - alreadyAccountedForBuckets = true; } if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... @@ -474,9 +470,6 @@ public InternalAggregation reduce(List aggregations, Aggreg CollectionUtil.introSort(reducedBuckets, order.comparator()); } } - if (false == alreadyAccountedForBuckets) { - reduceContext.consumeBucketsAndMaybeBreak(reducedBuckets.size()); - } return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, getMetadata()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java index ba33373354f3e..de7f29d785c75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java @@ -36,7 +36,7 @@ public List getBuckets() { return buckets; } - private static ObjectParser PARSER = new ObjectParser<>( + private static final ObjectParser PARSER = new ObjectParser<>( ParsedVariableWidthHistogram.class.getSimpleName(), true, ParsedVariableWidthHistogram::new diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index d89d9b07e57bc..945ecd7424de3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -82,7 +82,7 @@ private class BufferValuesPhase extends CollectionPhase { private DoubleArray buffer; private int bufferSize; - private int bufferLimit; + private final int bufferLimit; private MergeBucketsPhase mergeBucketsPhase; BufferValuesPhase(int bufferLimit) { @@ -97,7 +97,7 @@ public CollectionPhase collectValue(LeafBucketCollector sub, int doc, double val if (bufferSize < bufferLimit) { // Add to the buffer i.e store the doc in a new bucket buffer = bigArrays().grow(buffer, bufferSize + 1); - buffer.set((long) bufferSize, val); + buffer.set(bufferSize, val); collectBucket(sub, doc, bufferSize); bufferSize += 1; } @@ -179,7 +179,7 @@ private class MergeBucketsPhase extends CollectionPhase { * Sorts the indices of values by their underlying value * This will produce a merge map whose application will sort values */ - private class ClusterSorter extends InPlaceMergeSorter { + private static class ClusterSorter extends InPlaceMergeSorter { final DoubleArray values; final long[] indexes; @@ -432,7 +432,6 @@ public void close() { // Aggregation parameters private final int numBuckets; private final int shardSize; - private final int bufferLimit; private CollectionPhase collector; @@ -455,9 +454,8 @@ public void close() { this.valuesSource = (ValuesSource.Numeric) valuesSourceConfig.getValuesSource(); this.formatter = valuesSourceConfig.format(); this.shardSize = shardSize; - this.bufferLimit = initialBuffer; - collector = new BufferValuesPhase(this.bufferLimit); + collector = new BufferValuesPhase(initialBuffer); String scoringAgg = subAggsNeedScore(); String nestedAgg = descendsFromNestedAggregator(parent); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index dc0b42f507d84..cdb2ae4517a22 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -117,11 +117,7 @@ private static String key(String key, Double from, Double to) { if (key != null) { return key; } - StringBuilder sb = new StringBuilder(); - sb.append((from == null || from == 0) ? "*" : from); - sb.append("-"); - sb.append((to == null || Double.isInfinite(to)) ? "*" : to); - return sb.toString(); + return ((from == null || from == 0) ? "*" : from) + "-" + ((to == null || Double.isInfinite(to)) ? "*" : to); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index 23105bbe2d4f3..c8588136c1d33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -63,10 +63,7 @@ public Bucket( } private static String generateKey(BytesRef from, BytesRef to, DocValueFormat format) { - StringBuilder builder = new StringBuilder().append(from == null ? "*" : format.format(from)) - .append("-") - .append(to == null ? "*" : format.format(to)); - return builder.toString(); + return (from == null ? "*" : format.format(from)) + "-" + (to == null ? "*" : format.format(to)); } private static Bucket createFromStream(StreamInput in, DocValueFormat format, boolean keyed) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index cb970fc87fd33..046d5efb97ece 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -144,10 +144,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } private static String generateKey(double from, double to, DocValueFormat format) { - StringBuilder builder = new StringBuilder().append(Double.isInfinite(from) ? "*" : format.format(from)) - .append("-") - .append(Double.isInfinite(to) ? "*" : format.format(to)); - return builder.toString(); + return (Double.isInfinite(from) ? "*" : format.format(from)) + "-" + (Double.isInfinite(to) ? "*" : format.format(to)); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java index 499b8c3e4f039..a12c126fb73d8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedRange.java @@ -41,7 +41,7 @@ protected static void declareParsedRangeFields( final CheckedFunction bucketParser, final CheckedFunction keyedBucketParser ) { - declareMultiBucketAggregationFields(objectParser, bucketParser::apply, keyedBucketParser::apply); + declareMultiBucketAggregationFields(objectParser, bucketParser, keyedBucketParser); } private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 7c89061ea32f2..7d7e1a1a03bc4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -383,8 +383,16 @@ public static FromFilters adaptIntoFiltersOrNull( return null; } boolean wholeNumbersOnly = false == ((ValuesSource.Numeric) valuesSourceConfig.getValuesSource()).isFloatingPoint(); - FilterByFilterAggregator.AdapterBuilder> filterByFilterBuilder = new FilterByFilterAggregator.AdapterBuilder< - FromFilters>(name, false, false, null, context, parent, cardinality, metadata) { + FilterByFilterAggregator.AdapterBuilder> filterByFilterBuilder = new FilterByFilterAggregator.AdapterBuilder<>( + name, + false, + false, + null, + context, + parent, + cardinality, + metadata + ) { @Override protected FromFilters adapt(CheckedFunction delegate) throws IOException { @@ -547,8 +555,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(ranges.length); - for (int i = 0; i < ranges.length; i++) { - Range range = ranges[i]; + for (Range range : ranges) { org.elasticsearch.search.aggregations.bucket.range.Range.Bucket bucket = rangeFactory.createBucket( range.key, range.originalFrom, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index 5bca7718c9e2a..1344604a8d39c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -45,7 +45,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme private final List entries = new ArrayList<>(); private BucketCollector deferred; private ObjectArray perBucketSamples; - private int shardSize; + private final int shardSize; private PerSegmentCollects perSegCollector; private final BigArrays bigArrays; private final Consumer circuitBreakerConsumer; @@ -210,7 +210,7 @@ public int getDocCount() { } class PerSegmentCollects extends Scorable { - private AggregationExecutionContext aggCtx; + private final AggregationExecutionContext aggCtx; int maxDocId = Integer.MIN_VALUE; private float currentScore; private int currentDocId = -1; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java index 542fcc84a6411..aed2119dec483 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java @@ -204,7 +204,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { if (lastBucket != null && cmp.compare(top.current(), lastBucket) != 0) { // the key changed so bundle up the last key's worth of buckets boolean shouldContinue = sink.apply( - new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) + new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) ); if (false == shouldContinue) { return; @@ -228,7 +228,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { } if (sameTermBuckets.isEmpty() == false) { - sink.apply(new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); + sink.apply(new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets)); } } @@ -249,7 +249,7 @@ private void reduceLegacy( } for (List sameTermBuckets : bucketMap.values()) { boolean shouldContinue = sink.apply( - new DelayedBucket(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) + new DelayedBucket<>(AbstractInternalTerms.this::reduceBucket, reduceContext, sameTermBuckets) ); if (false == shouldContinue) { return; @@ -300,7 +300,7 @@ public InternalAggregation reduce(List aggregations, Aggreg TopBucketBuilder top = TopBucketBuilder.build( getRequiredSize(), getOrder(), - removed -> { otherDocCount[0] += removed.getDocCount(); } + removed -> otherDocCount[0] += removed.getDocCount() ); thisReduceOrder = reduceBuckets(aggregations, reduceContext, bucket -> { if (bucket.getDocCount() >= getMinDocCount()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index 70f258e523527..524c648215345 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -170,7 +170,7 @@ public int hashCode() { private Set valids; private Set invalids; - private Long spare = new Long(0); + private final Long spare = new Long(0); private SetBackedLongFilter(int numValids, int numInvalids) { if (numValids > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java index aaa9857fc1562..b41e402c029f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java @@ -34,7 +34,6 @@ public abstract class InternalMappedRareTerms, protected DocValueFormat format; protected List buckets; - protected Map bucketMap; final SetBackedScalingCuckooFilter filter; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index cffe11c5729eb..07aa318e9c487 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -213,11 +213,7 @@ public InternalAggregation reduce(List aggregations, Aggreg @SuppressWarnings("unchecked") InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; for (B bucket : terms.getBuckets()) { - List existingBuckets = buckets.get(bucket.getKeyAsString()); - if (existingBuckets == null) { - existingBuckets = new ArrayList<>(aggregations.size()); - buckets.put(bucket.getKeyAsString(), existingBuckets); - } + List existingBuckets = buckets.computeIfAbsent(bucket.getKeyAsString(), k -> new ArrayList<>(aggregations.size())); // Adjust the buckets with the global stats representing the // total size of the pots from which the stats are drawn existingBuckets.add( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 85307a903a3eb..1d32251ffc33a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -93,6 +93,18 @@ public long getDocCount() { return docCount; } + public void setDocCount(long docCount) { + this.docCount = docCount; + } + + public long getBucketOrd() { + return bucketOrd; + } + + public void setBucketOrd(long bucketOrd) { + this.bucketOrd = bucketOrd; + } + @Override public long getDocCountError() { if (showDocCountError == false) { @@ -102,7 +114,7 @@ public long getDocCountError() { } @Override - protected void setDocCountError(long docCountError) { + public void setDocCountError(long docCountError) { this.docCountError = docCountError; } @@ -121,6 +133,10 @@ public Aggregations getAggregations() { return aggregations; } + public void setAggregations(InternalAggregations aggregations) { + this.aggregations = aggregations; + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java index 279625654e734..6b21b11db5015 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java @@ -130,8 +130,8 @@ public Iterator keyOrderedIterator(long owningBucketOrd) { } } Iterator toReturn = new Iterator<>() { - Iterator wrapped = keySet.iterator(); - long filterOrd = owningBucketOrd; + final Iterator wrapped = keySet.iterator(); + final long filterOrd = owningBucketOrd; long next; boolean hasNext = true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index 056a8a00dd72f..ce911379d9ddb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -34,6 +34,7 @@ import java.io.IOException; import java.util.Map; import java.util.Objects; +import java.util.function.ToLongFunction; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; @@ -137,7 +138,7 @@ public boolean supportsSampling() { } @Override - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { return false; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index fe27738fe7589..fa05ffbd58295 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -59,6 +59,14 @@ public Object getKey() { return getKeyAsString(); } + public BytesRef getTermBytes() { + return termBytes; + } + + public void setTermBytes(BytesRef termBytes) { + this.termBytes = termBytes; + } + // this method is needed for scripted numeric aggs @Override public Number getKeyAsNumber() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java index 0e0db3ab5054f..d1458b04f17a1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregatorFromFilters.java @@ -69,16 +69,7 @@ static StringTermsAggregatorFromFilters adaptIntoFiltersOrNull( return null; } FilterByFilterAggregator.AdapterBuilder filterByFilterBuilder = - new FilterByFilterAggregator.AdapterBuilder( - name, - false, - false, - null, - context, - parent, - cardinality, - metadata - ) { + new FilterByFilterAggregator.AdapterBuilder<>(name, false, false, null, context, parent, cardinality, metadata) { @Override protected StringTermsAggregatorFromFilters adapt( CheckedFunction delegate @@ -164,7 +155,7 @@ protected InternalAggregation adapt(InternalAggregation delegateResult) throws I } TermsEnum terms = valuesSupplier.get().termsEnum(); if (filters.getBuckets().size() > bucketCountThresholds.getShardSize()) { - PriorityQueue queue = new PriorityQueue(bucketCountThresholds.getShardSize()) { + PriorityQueue queue = new PriorityQueue<>(bucketCountThresholds.getShardSize()) { private final Comparator comparator = order.comparator(); @Override @@ -195,7 +186,7 @@ protected boolean lessThan(OrdBucket a, OrdBucket b) { for (OrdBucket b : queue) { buckets.add(buildBucket(b, terms)); } - Collections.sort(buckets, reduceOrder.comparator()); + buckets.sort(reduceOrder.comparator()); } else { /* * Note for the curious: you can just use a for loop to iterate @@ -217,7 +208,7 @@ protected boolean lessThan(OrdBucket a, OrdBucket b) { } buckets.add(buildBucket(b, terms)); } - Collections.sort(buckets, reduceOrder.comparator()); + buckets.sort(reduceOrder.comparator()); } return new StringTerms( filters.getName(), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index ebc6b2c1cc70c..68263e2d72b9c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -35,8 +35,11 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.ToLongFunction; public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder { + public static final int KEY_ORDER_CONCURRENCY_THRESHOLD = 50; + public static final String NAME = "terms"; public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>( NAME, @@ -106,13 +109,13 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private IncludeExclude includeExclude = null; private String executionHint = null; private SubAggCollectionMode collectMode = null; - private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( - DEFAULT_BUCKET_COUNT_THRESHOLDS - ); + private final TermsAggregator.BucketCountThresholds bucketCountThresholds; + private boolean showTermDocCountError = false; public TermsAggregationBuilder(String name) { super(name); + this.bucketCountThresholds = new TermsAggregator.BucketCountThresholds(DEFAULT_BUCKET_COUNT_THRESHOLDS); } protected TermsAggregationBuilder( @@ -135,7 +138,34 @@ public boolean supportsSampling() { } @Override - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { + /* + * we parallelize only if the cardinality of the field is lower than shard size, this is to minimize precision issues. + * When ordered by term, we still take cardinality into account to avoid overhead that concurrency may cause against + * high cardinality fields. + */ + if (script() == null + && (executionHint == null || executionHint.equals(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()))) { + long cardinality = fieldCardinalityResolver.applyAsLong(field()); + if (supportsParallelCollection(cardinality, order, bucketCountThresholds)) { + return super.supportsParallelCollection(fieldCardinalityResolver); + } + } + return false; + } + + /** + * Whether a terms aggregation with the provided order and bucket count thresholds against a field + * with the given cardinality should be executed concurrency. + */ + public static boolean supportsParallelCollection(long cardinality, BucketOrder order, BucketCountThresholds bucketCountThresholds) { + if (cardinality != -1) { + if (InternalOrder.isKeyOrder(order)) { + return cardinality <= KEY_ORDER_CONCURRENCY_THRESHOLD; + } + BucketCountThresholds adjusted = TermsAggregatorFactory.adjustBucketCountThresholds(bucketCountThresholds, order); + return cardinality <= adjusted.getShardSize(); + } return false; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 31c6a4a7e0430..e17cd828a24d0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -277,39 +277,45 @@ private static boolean isAggregationSort(BucketOrder order) { } } - @Override - protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) - throws IOException { - BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds); + public static BucketCountThresholds adjustBucketCountThresholds(BucketCountThresholds bucketCountThresholds, BucketOrder order) { + BucketCountThresholds newBucketCountThresholds = new BucketCountThresholds(bucketCountThresholds); if (InternalOrder.isKeyOrder(order) == false - && bucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.shardSize()) { + && newBucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.shardSize()) { // The user has not made a shardSize selection. Use default // heuristic to avoid any wrong-ranking caused by distributed // counting - bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize())); + newBucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(newBucketCountThresholds.getRequiredSize())); } + newBucketCountThresholds.ensureValidity(); + return newBucketCountThresholds; + } + + @Override + protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) + throws IOException { + + BucketCountThresholds adjusted = adjustBucketCountThresholds(this.bucketCountThresholds, order); // If min_doc_count and shard_min_doc_count is provided, we do not support them being larger than 1 // This is because we cannot be sure about their relative scale when sampled if (getSamplingContext().map(SamplingContext::isSampled).orElse(false)) { - if (bucketCountThresholds.getMinDocCount() > 1 || bucketCountThresholds.getShardMinDocCount() > 1) { + if (adjusted.getMinDocCount() > 1 || adjusted.getShardMinDocCount() > 1) { throw new ElasticsearchStatusException( "aggregation [{}] is within a sampling context; " + "min_doc_count, provided [{}], and min_shard_doc_count, provided [{}], cannot be greater than 1", RestStatus.BAD_REQUEST, name(), - bucketCountThresholds.getMinDocCount(), - bucketCountThresholds.getShardMinDocCount() + adjusted.getMinDocCount(), + adjusted.getShardMinDocCount() ); } } - bucketCountThresholds.ensureValidity(); return aggregatorSupplier.build( name, factories, config, order, - bucketCountThresholds, + adjusted, includeExclude, executionHint, context, @@ -468,7 +474,7 @@ Aggregator create( && ordinalsValuesSource.supportsGlobalOrdinalsMapping() && // we use the static COLLECT_SEGMENT_ORDS to allow tests to force specific optimizations - (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS.booleanValue() : ratio <= 0.5 && maxOrd <= 2048)) { + (COLLECT_SEGMENT_ORDS != null ? COLLECT_SEGMENT_ORDS : ratio <= 0.5 && maxOrd <= 2048)) { /* * We can use the low cardinality execution mode iff this aggregator: * - has no sub-aggregator AND @@ -505,7 +511,7 @@ Aggregator create( * is only possible if we're collecting from a single * bucket. */ - remapGlobalOrds = REMAP_GLOBAL_ORDS.booleanValue(); + remapGlobalOrds = REMAP_GLOBAL_ORDS; } else { remapGlobalOrds = true; if (includeExclude == null diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java index edd1b42668697..abe4987573cb9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java @@ -81,12 +81,7 @@ private Object getComparableData(long bucketOrd) { AbstractHyperLogLog.RunLenIterator iterator = getHyperLogLog(bucketOrd); while (iterator.next()) { byte runLength = iterator.value(); - Integer numOccurances = values.get(runLength); - if (numOccurances == null) { - values.put(runLength, 1); - } else { - values.put(runLength, numOccurances + 1); - } + values.merge(runLength, 1, Integer::sum); } return values; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index 9b4656ee7cf7e..a77cd495f87db 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -121,7 +121,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return Arrays.stream(getKeys()).mapToObj(d -> String.valueOf(d)).toList(); + return Arrays.stream(getKeys()).mapToObj(String::valueOf).toList(); } public DocValueFormat formatter() { @@ -210,9 +210,9 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th DoubleHistogram state = getState(); if (keyed) { builder.startObject(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; ++i) { - String key = String.valueOf(keys[i]); - double value = value(keys[i]); + for (double v : keys) { + String key = String.valueOf(v); + double value = value(v); builder.field(key, state.getTotalCount() == 0 ? null : value); if (format != DocValueFormat.RAW && state.getTotalCount() > 0) { builder.field(key + "_as_string", format.format(value).toString()); @@ -221,10 +221,10 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.endObject(); } else { builder.startArray(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; i++) { - double value = value(keys[i]); + for (double key : keys) { + double value = value(key); builder.startObject(); - builder.field(CommonFields.KEY.getPreferredName(), keys[i]); + builder.field(CommonFields.KEY.getPreferredName(), key); builder.field(CommonFields.VALUE.getPreferredName(), state.getTotalCount() == 0 ? null : value); if (format != DocValueFormat.RAW && state.getTotalCount() > 0) { builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java index 08588473c61d1..3ae609689ed7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java @@ -101,7 +101,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return Arrays.stream(getKeys()).mapToObj(d -> String.valueOf(d)).toList(); + return Arrays.stream(getKeys()).mapToObj(String::valueOf).toList(); } public abstract double value(double key); @@ -188,9 +188,9 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th TDigestState state = getState(); if (keyed) { builder.startObject(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; ++i) { - String key = String.valueOf(keys[i]); - double value = value(keys[i]); + for (double v : keys) { + String key = String.valueOf(v); + double value = value(v); builder.field(key, state.size() == 0 ? null : value); if (format != DocValueFormat.RAW && state.size() > 0) { builder.field(key + "_as_string", format.format(value).toString()); @@ -199,10 +199,10 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.endObject(); } else { builder.startArray(CommonFields.VALUES.getPreferredName()); - for (int i = 0; i < keys.length; i++) { - double value = value(keys[i]); + for (double key : keys) { + double value = value(key); builder.startObject(); - builder.field(CommonFields.KEY.getPreferredName(), keys[i]); + builder.field(CommonFields.KEY.getPreferredName(), key); builder.field(CommonFields.VALUE.getPreferredName(), state.size() == 0 ? null : value); if (format != DocValueFormat.RAW && state.size() > 0) { builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index f27efafaf64cf..bbeebf858073a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -111,7 +111,7 @@ public static ExecutionMode fromString(String value) { } } - boolean isHeuristicBased; + final boolean isHeuristicBased; ExecutionMode(boolean isHeuristicBased) { this.isHeuristicBased = isHeuristicBased; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java index b469c24175715..cecd75941bcab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java @@ -140,20 +140,11 @@ public boolean hasMetric(String name) { public double metric(String name, long owningBucketOrd) { if (owningBucketOrd >= counts.size()) { return switch (InternalExtendedStats.Metrics.resolve(name)) { - case count -> 0; - case sum -> 0; + case count, sum_of_squares, sum -> 0; case min -> Double.POSITIVE_INFINITY; case max -> Double.NEGATIVE_INFINITY; - case avg -> Double.NaN; - case sum_of_squares -> 0; - case variance -> Double.NaN; - case variance_population -> Double.NaN; - case variance_sampling -> Double.NaN; - case std_deviation -> Double.NaN; - case std_deviation_population -> Double.NaN; - case std_deviation_sampling -> Double.NaN; - case std_upper -> Double.NaN; - case std_lower -> Double.NaN; + case avg, variance, variance_population, variance_sampling, std_deviation, std_deviation_population, std_deviation_sampling, + std_upper, std_lower -> Double.NaN; default -> throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); }; } @@ -167,9 +158,7 @@ public double metric(String name, long owningBucketOrd) { case variance -> variance(owningBucketOrd); case variance_population -> variancePopulation(owningBucketOrd); case variance_sampling -> varianceSampling(owningBucketOrd); - case std_deviation -> Math.sqrt(variance(owningBucketOrd)); - case std_deviation_population -> Math.sqrt(variance(owningBucketOrd)); - case std_deviation_sampling -> Math.sqrt(varianceSampling(owningBucketOrd)); + case std_deviation, std_deviation_population, std_deviation_sampling -> Math.sqrt(variance(owningBucketOrd)); case std_upper -> (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) + (Math.sqrt(variance(owningBucketOrd)) * this.sigma); case std_lower -> (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) - (Math.sqrt(variance(owningBucketOrd)) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java index b44bc69ae68e6..d04a3744df4ff 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java @@ -54,7 +54,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalHDRPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalHDRPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 08d0907c2a1bd..30225263eb8b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -244,7 +244,7 @@ private static class HyperLogLogIterator implements AbstractHyperLogLog.RunLenIt private final HyperLogLog hll; int pos; - long start; + final long start; private byte value; HyperLogLogIterator(HyperLogLog hll, long bucket) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java index 18b1f44ce5d7f..77cb482edd8b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java @@ -98,7 +98,6 @@ int getSize() { @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { - final SearchHits[] shardHits = new SearchHits[aggregations.size()]; final int from; final int size; if (reduceContext.isFinalReduce()) { @@ -113,65 +112,66 @@ public InternalAggregation reduce(List aggregations, Aggreg final TopDocs reducedTopDocs; final TopDocs[] shardDocs; - - if (topDocs.topDocs instanceof TopFieldDocs) { - Sort sort = new Sort(((TopFieldDocs) topDocs.topDocs).fields); + final float maxScore; + if (topDocs.topDocs instanceof TopFieldDocs topFieldDocs) { shardDocs = new TopFieldDocs[aggregations.size()]; - for (int i = 0; i < shardDocs.length; i++) { - InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs.topDocs; - shardHits[i] = topHitsAgg.searchHits; - for (ScoreDoc doc : shardDocs[i].scoreDocs) { - doc.shardIndex = i; - } - } - reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs); + maxScore = reduceAndFindMaxScore(aggregations, shardDocs); + reducedTopDocs = TopDocs.merge(new Sort(topFieldDocs.fields), from, size, (TopFieldDocs[]) shardDocs); } else { shardDocs = new TopDocs[aggregations.size()]; - for (int i = 0; i < shardDocs.length; i++) { - InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs.topDocs; - shardHits[i] = topHitsAgg.searchHits; - for (ScoreDoc doc : shardDocs[i].scoreDocs) { - doc.shardIndex = i; - } - } + maxScore = reduceAndFindMaxScore(aggregations, shardDocs); reducedTopDocs = TopDocs.merge(from, size, shardDocs); } - - float maxScore = Float.NaN; - for (InternalAggregation agg : aggregations) { - InternalTopHits topHitsAgg = (InternalTopHits) agg; - if (Float.isNaN(topHitsAgg.topDocs.maxScore) == false) { - if (Float.isNaN(maxScore)) { - maxScore = topHitsAgg.topDocs.maxScore; - } else { - maxScore = Math.max(maxScore, topHitsAgg.topDocs.maxScore); - } - } - } - - final int[] tracker = new int[shardHits.length]; - SearchHit[] hits = new SearchHit[reducedTopDocs.scoreDocs.length]; - for (int i = 0; i < reducedTopDocs.scoreDocs.length; i++) { - ScoreDoc scoreDoc = reducedTopDocs.scoreDocs[i]; - int position; - do { - position = tracker[scoreDoc.shardIndex]++; - } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc); - hits[i] = shardHits[scoreDoc.shardIndex].getAt(position); - } assert reducedTopDocs.totalHits.relation == Relation.EQUAL_TO; + return new InternalTopHits( name, this.from, this.size, new TopDocsAndMaxScore(reducedTopDocs, maxScore), - new SearchHits(hits, reducedTopDocs.totalHits, maxScore), + extractSearchHits(aggregations, reducedTopDocs, shardDocs, maxScore), getMetadata() ); } + private static SearchHits extractSearchHits( + List aggregations, + TopDocs reducedTopDocs, + TopDocs[] shardDocs, + float maxScore + ) { + final int[] tracker = new int[aggregations.size()]; + ScoreDoc[] scoreDocs = reducedTopDocs.scoreDocs; + SearchHit[] hits = new SearchHit[scoreDocs.length]; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + int shardIndex = scoreDoc.shardIndex; + TopDocs topDocsForShard = shardDocs[shardIndex]; + int position; + do { + position = tracker[shardIndex]++; + } while (topDocsForShard.scoreDocs[position] != scoreDoc); + hits[i] = ((InternalTopHits) aggregations.get(shardIndex)).searchHits.getAt(position); + } + return new SearchHits(hits, reducedTopDocs.totalHits, maxScore); + } + + private static float reduceAndFindMaxScore(List aggregations, TopDocs[] shardDocs) { + float maxScore = Float.NaN; + for (int i = 0; i < shardDocs.length; i++) { + InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); + shardDocs[i] = topHitsAgg.topDocs.topDocs; + for (ScoreDoc doc : shardDocs[i].scoreDocs) { + doc.shardIndex = i; + } + final float max = topHitsAgg.topDocs.maxScore; + if (Float.isNaN(max) == false) { + maxScore = Float.isNaN(maxScore) ? max : Math.max(maxScore, max); + } + } + return maxScore; + } + @Override public InternalAggregation finalizeSampling(SamplingContext samplingContext) { return this; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java index fed48ec7640e3..24f68d87802bf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedGeoBounds.java @@ -80,11 +80,7 @@ public GeoPoint bottomRight() { static { declareAggregationFields(PARSER); - PARSER.declareObject( - (agg, bbox) -> { agg.geoBoundingBox = new GeoBoundingBox(bbox.v1(), bbox.v2()); }, - BOUNDS_PARSER, - BOUNDS_FIELD - ); + PARSER.declareObject((agg, bbox) -> agg.geoBoundingBox = new GeoBoundingBox(bbox.v1(), bbox.v2()), BOUNDS_PARSER, BOUNDS_FIELD); BOUNDS_PARSER.declareObject(constructorArg(), GEO_POINT_PARSER, TOP_LEFT_FIELD); BOUNDS_PARSER.declareObject(constructorArg(), GEO_POINT_PARSER, BOTTOM_RIGHT_FIELD); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java index 59deb90c7e5a2..2ee56bf648dcc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentiles.java @@ -52,6 +52,6 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java index 0bf317c36be16..44ecf5cf69b4c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java @@ -31,7 +31,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java index d1b0f03904ef9..3af30aa16f094 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java @@ -111,7 +111,7 @@ protected static void declarePercentilesFields(ObjectParser 0) { - double key = Double.valueOf(parser.currentName().substring(0, i)); + double key = Double.parseDouble(parser.currentName().substring(0, i)); aggregation.addPercentileAsString(key, parser.text()); } else { aggregation.addPercentile(Double.valueOf(parser.currentName()), Double.valueOf(parser.text())); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java index 78001e3c65534..b5ab17ba335c3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentiles.java @@ -37,7 +37,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } private static final ObjectParser PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 0596af8cbb51d..487cc2bd11bd3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -284,9 +284,4 @@ public boolean equals(Object obj) { && Objects.equals(reduceScript, other.reduceScript) && Objects.equals(params, other.params); } - - @Override - public boolean supportsParallelCollection() { - return false; - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java index 5290aac3e055d..7e749b06442f6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -158,7 +158,6 @@ public void doClose() { private class State { private final ScriptedMetricAggContexts.MapScript.LeafFactory mapScript; - private final Map mapScriptParamsForState; private final Map combineScriptParamsForState; private final Map aggState; private MapScript leafMapScript; @@ -166,7 +165,7 @@ private class State { State() { // Its possible for building the initial state to mutate the parameters as a side effect Map aggParamsForState = ScriptedMetricAggregatorFactory.deepCopyParams(aggParams); - mapScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, mapScriptParams); + Map mapScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, mapScriptParams); combineScriptParamsForState = ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, combineScriptParams); aggState = newInitialState(ScriptedMetricAggregatorFactory.mergeParams(aggParamsForState, initScriptParams)); mapScript = mapScriptFactory.newFactory( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java index 02a8325abe7b8..7a5861eb97fe2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregator.java @@ -112,8 +112,7 @@ public boolean hasMetric(String name) { public double metric(String name, long owningBucketOrd) { if (owningBucketOrd >= counts.size()) { return switch (InternalStats.Metrics.resolve(name)) { - case count -> 0; - case sum -> 0; + case count, sum -> 0; case min -> Double.POSITIVE_INFINITY; case max -> Double.NEGATIVE_INFINITY; case avg -> Double.NaN; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java index 71082d7abc29c..8328f25a5cab0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregator.java @@ -54,7 +54,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalTDigestPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalTDigestPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java index d80eb8a58040e..23c26794f6bb5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java @@ -77,9 +77,6 @@ public static TDigestState create(double compression, TDigestExecutionHint execu return switch (executionHint) { case HIGH_ACCURACY -> createOptimizedForAccuracy(compression); case DEFAULT -> create(compression); - default -> throw new IllegalArgumentException( - "Unexpected TDigestExecutionHint in TDigestState initialization: " + executionHint - ); }; } @@ -99,7 +96,6 @@ protected TDigestState(Type type, double compression) { case AVL_TREE -> TDigest.createAvlTreeDigest(compression); case SORTING -> TDigest.createSortingDigest(); case MERGING -> TDigest.createMergingDigest(compression); - default -> throw new IllegalArgumentException("Unexpected TDigestState type: " + type); }; this.type = type; this.compression = compression; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index d7113fc6ec798..00db45e2d06b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -272,9 +272,7 @@ public TopHitsAggregationBuilder sorts(List> sorts) { if (this.sorts == null) { this.sorts = new ArrayList<>(); } - for (SortBuilder sort : sorts) { - this.sorts.add(sort); - } + this.sorts.addAll(sorts); return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java index 55cd1efa40e0d..75f5c472c6665 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java @@ -191,8 +191,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE for (int i = 0; i < topDocs.scoreDocs.length; i++) { docIdsToLoad[i] = topDocs.scoreDocs[i].doc; } - subSearchContext.docIdsToLoad(docIdsToLoad); - subSearchContext.fetchPhase().execute(subSearchContext); + subSearchContext.fetchPhase().execute(subSearchContext, docIdsToLoad); FetchSearchResult fetchResult = subSearchContext.fetchResult(); if (fetchProfiles != null) { fetchProfiles.add(fetchResult.profileResult()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java index bef9b64c6e95b..c763ea5cf2bd3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -126,7 +126,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return Arrays.stream(percents).mapToObj(d -> String.valueOf(d)).toList(); + return Arrays.stream(percents).mapToObj(String::valueOf).toList(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java index c174dd5458685..c31acfcdd20f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java @@ -37,7 +37,7 @@ public class MovAvgPipelineAggregationBuilder extends AbstractPipelineAggregatio public static final String MOVING_AVG_AGG_DEPRECATION_MSG = "Moving Average aggregation usage is not supported. " + "Use the [moving_fn] aggregation instead."; - public static ParseField NAME_V7 = new ParseField("moving_avg").withAllDeprecated(MOVING_AVG_AGG_DEPRECATION_MSG) + public static final ParseField NAME_V7 = new ParseField("moving_avg").withAllDeprecated(MOVING_AVG_AGG_DEPRECATION_MSG) .forRestApiVersion(RestApiVersion.equalTo(RestApiVersion.V_7)); public static final ContextParser PARSER = (parser, name) -> { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java index 0b982f8f2e586..53bf09329c57b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovingFunctions.java @@ -174,7 +174,7 @@ public static double holt(double[] values, double alpha, double beta) { int counter = 0; - Double last; + double last; for (double v : values) { if (Double.isNaN(v) == false) { last = v; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java index f9e037247bf2c..7da76d2d4c2eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedPercentilesBucket.java @@ -57,7 +57,7 @@ public double value(String name) { @Override public Iterable valueNames() { - return percentiles.keySet().stream().map(d -> d.toString()).toList(); + return percentiles.keySet().stream().map(Object::toString).toList(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java index 4c2f9a825c1fa..9bd27a9931bd0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregator.java @@ -92,9 +92,9 @@ public String toString() { } } - private String name; - private String[] bucketsPaths; - private Map metadata; + private final String name; + private final String[] bucketsPaths; + private final Map metadata; protected PipelineAggregator(String name, String[] bucketsPaths, Map metadata) { this.name = name; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java index 500c107065520..7225d7652b3b8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregator.java @@ -28,9 +28,9 @@ import static org.elasticsearch.search.aggregations.pipeline.BucketHelpers.resolveBucketValue; public class SerialDiffPipelineAggregator extends PipelineAggregator { - private DocValueFormat formatter; - private GapPolicy gapPolicy; - private int lag; + private final DocValueFormat formatter; + private final GapPolicy gapPolicy; + private final int lag; SerialDiffPipelineAggregator( String name, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java index 35b8230a48554..24cceabf2388d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceType.java @@ -338,14 +338,10 @@ public Function roundingPreparer(AggregationContext @Override public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) { // Only extract bounds queries that must filter the results - switch (occur) { - case MUST: - case FILTER: - return this; - - default: - return QueryVisitor.EMPTY_VISITOR; - } + return switch (occur) { + case MUST, FILTER -> this; + default -> QueryVisitor.EMPTY_VISITOR; + }; }; @Override @@ -450,5 +446,5 @@ public String typeName() { } /** List containing all members of the enumeration. */ - public static List ALL_CORE = Arrays.asList(CoreValuesSourceType.values()); + public static final List ALL_CORE = Arrays.asList(CoreValuesSourceType.values()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java index 101e94b6717c4..30db7c984db7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java @@ -14,39 +14,24 @@ * Used by all field data based aggregators. This determine the context of the field data the aggregators are operating * in. It holds both the field names and the index field datas that are associated with them. */ -public class FieldContext { - - private final String field; - private final IndexFieldData indexFieldData; - private final MappedFieldType fieldType; +public record FieldContext(String field, IndexFieldData indexFieldData, MappedFieldType fieldType) { /** * Constructs a field data context for the given field and its index field data * - * @param field The name of the field - * @param indexFieldData The index field data of the field + * @param field The name of the field + * @param indexFieldData The index field data of the field */ - public FieldContext(String field, IndexFieldData indexFieldData, MappedFieldType fieldType) { - this.field = field; - this.indexFieldData = indexFieldData; - this.fieldType = fieldType; - } - - public String field() { - return field; - } + public FieldContext {} /** * @return The index field datas in this context */ + @Override public IndexFieldData indexFieldData() { return indexFieldData; } - public MappedFieldType fieldType() { - return fieldType; - } - public String getTypeName() { return fieldType.typeName(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index 4472083060d6e..7e0c235ee4fb3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -96,7 +96,6 @@ protected MultiValuesSourceAggregationBuilder(StreamInput in) throws IOException /** * Read from a stream. */ - @SuppressWarnings("unchecked") private void read(StreamInput in) throws IOException { fields = in.readMap(MultiValuesSourceFieldConfig::new); userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java index c1681a2070078..57ea138f63268 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/SamplingContext.java @@ -21,7 +21,7 @@ * This provides information around the current sampling context for aggregations */ public record SamplingContext(double probability, int seed) { - public static SamplingContext NONE = new SamplingContext(1.0, 0); + public static final SamplingContext NONE = new SamplingContext(1.0, 0); public boolean isSampled() { return probability < 1.0; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index bc83a5b5cd3b1..91bc2d12ac575 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -69,7 +69,7 @@ public ValuesSourceType getValuesSourceType() { return valuesSourceType; } - private static Set numericValueTypes = Set.of( + private static final Set numericValueTypes = Set.of( ValueType.DOUBLE, ValueType.DATE, ValueType.LONG, @@ -77,7 +77,7 @@ public ValuesSourceType getValuesSourceType() { ValueType.NUMERIC, ValueType.BOOLEAN ); - private static Set stringValueTypes = Set.of(ValueType.STRING, ValueType.IP); + private static final Set stringValueTypes = Set.of(ValueType.STRING, ValueType.IP); /** * This is a bit of a hack to mirror the old {@link ValueType} behavior, which would allow a rough compatibility between types. This diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 85788c1964b40..af75a8495afba 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -182,7 +182,6 @@ private static ValuesSourceConfig internalResolve( aggregationScript, scriptValueType, missing, - timeZone, docValueFormat, context::nowInMillis ); @@ -258,14 +257,14 @@ private static DocValueFormat resolveFormat( public static ValuesSourceConfig resolveFieldOnly(MappedFieldType fieldType, AggregationContext context) { FieldContext fieldContext = context.buildFieldContext(fieldType); ValuesSourceType vstype = fieldContext.indexFieldData().getValuesSourceType(); - return new ValuesSourceConfig(vstype, fieldContext, false, null, null, null, null, null, context::nowInMillis); + return new ValuesSourceConfig(vstype, fieldContext, false, null, null, null, null, context::nowInMillis); } /** * Convenience method for creating unmapped configs */ public static ValuesSourceConfig resolveUnmapped(ValuesSourceType valuesSourceType, AggregationContext context) { - return new ValuesSourceConfig(valuesSourceType, null, true, null, null, null, null, null, context::nowInMillis); + return new ValuesSourceConfig(valuesSourceType, null, true, null, null, null, null, context::nowInMillis); } private final ValuesSourceType valuesSourceType; @@ -275,7 +274,6 @@ public static ValuesSourceConfig resolveUnmapped(ValuesSourceType valuesSourceTy private final boolean unmapped; private final DocValueFormat format; private final Object missing; - private final ZoneId timeZone; private final ValuesSource valuesSource; @SuppressWarnings("this-escape") @@ -286,7 +284,6 @@ public ValuesSourceConfig( AggregationScript.LeafFactory script, ValueType scriptValueType, Object missing, - ZoneId timeZone, DocValueFormat format, LongSupplier nowInMillis ) { @@ -299,7 +296,6 @@ public ValuesSourceConfig( this.script = script; this.scriptValueType = scriptValueType; this.missing = missing; - this.timeZone = timeZone; this.format = format == null ? DocValueFormat.RAW : format; if (valid() == false) { @@ -383,10 +379,6 @@ public Object missing() { return this.missing; } - public ZoneId timezone() { - return this.timeZone; - } - public DocValueFormat format() { return format; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java index c33ad5266d4e2..44e66d98f0258 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceRegistry.java @@ -53,12 +53,9 @@ public int hashCode() { } } - @SuppressWarnings("rawtypes") - public static final RegistryKey UNREGISTERED_KEY = new RegistryKey<>("unregistered", RegistryKey.class); - public static class Builder { private final AggregationUsageService.Builder usageServiceBuilder; - private Map, List>> aggregatorRegistry = new HashMap<>(); + private final Map, List>> aggregatorRegistry = new HashMap<>(); public Builder() { this.usageServiceBuilder = new AggregationUsageService.Builder(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java index 0e122162e5e87..32f84612fb887 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java @@ -81,7 +81,7 @@ private static double toDoubleValue(Object o) { // that scripts return the same internal representation as regular fields, so boolean // values in scripts need to be converted to a number, and the value formatter will // make sure of using true/false in the key_as_string field - return ((Boolean) o).booleanValue() ? 1.0 : 0.0; + return (Boolean) o ? 1.0 : 0.0; } else { throw AggregationErrors.unsupportedScriptValue(o == null ? "null" : o.toString()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java index f702be71c49f3..66a8513e7c118 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java @@ -83,7 +83,7 @@ private static long toLongValue(Object o) { // that scripts return the same internal representation as regular fields, so boolean // values in scripts need to be converted to a number, and the value formatter will // make sure of using true/false in the key_as_string field - return ((Boolean) o).booleanValue() ? 1L : 0L; + return (Boolean) o ? 1L : 0L; } else { throw AggregationErrors.unsupportedScriptValue(o == null ? "null" : o.toString()); } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 40d46a71405dd..069aa6ff41ae1 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -63,6 +63,7 @@ import java.util.Map; import java.util.Objects; import java.util.function.Consumer; +import java.util.function.ToLongFunction; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; @@ -2103,7 +2104,7 @@ public String toString(Params params) { } } - public boolean supportsParallelCollection() { + public boolean supportsParallelCollection(ToLongFunction fieldCardinality) { if (profile) return false; if (sorts != null) { @@ -2113,6 +2114,6 @@ public boolean supportsParallelCollection() { } } - return collapse == null && (aggregations == null || aggregations.supportsParallelCollection()); + return collapse == null && (aggregations == null || aggregations.supportsParallelCollection(fieldCardinality)); } } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index 049e06b0d98c7..f787e30644658 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -204,6 +204,6 @@ public CollapseContext build(SearchExecutionContext searchExecutionContext) { ); } - return new CollapseContext(field, fieldType, innerHits); + return new CollapseContext(field, fieldType); } } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java index 62d7f7cc74cd4..080caaeed0fde 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java @@ -11,23 +11,18 @@ import org.apache.lucene.search.Sort; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.CollapseType; -import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.lucene.grouping.SinglePassGroupingCollector; -import java.util.List; - /** * Context used for field collapsing */ public class CollapseContext { private final String fieldName; private final MappedFieldType fieldType; - private final List innerHits; - public CollapseContext(String fieldName, MappedFieldType fieldType, List innerHits) { + public CollapseContext(String fieldName, MappedFieldType fieldType) { this.fieldName = fieldName; this.fieldType = fieldType; - this.innerHits = innerHits; } /** @@ -42,11 +37,6 @@ public MappedFieldType getFieldType() { return fieldType; } - /** The inner hit options to expand the collapsed results **/ - public List getInnerHit() { - return innerHits; - } - public SinglePassGroupingCollector createTopDocs(Sort sort, int topN, FieldDoc after) { if (fieldType.collapseType() == CollapseType.KEYWORD) { return SinglePassGroupingCollector.createKeyword(fieldName, fieldType, sort, topN, after); diff --git a/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java b/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java index 1bd70b5c14817..0ce6824ec432b 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/AggregatedDfs.java @@ -21,9 +21,9 @@ public class AggregatedDfs implements Writeable { - private Map termStatistics; - private Map fieldStatistics; - private long maxDoc; + private final Map termStatistics; + private final Map fieldStatistics; + private final long maxDoc; public AggregatedDfs(StreamInput in) throws IOException { int size = in.readVInt(); @@ -51,10 +51,6 @@ public Map fieldStatistics() { return fieldStatistics; } - public long maxDoc() { - return maxDoc; - } - @Override public void writeTo(final StreamOutput out) throws IOException { out.writeMap(termStatistics, (o, k) -> { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 5a04404c2e38a..5c98808c9c169 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -56,7 +56,7 @@ public FetchPhase(List fetchSubPhases) { this.fetchSubPhases[fetchSubPhases.size()] = new InnerHitsPhase(this); } - public void execute(SearchContext context) { + public void execute(SearchContext context, int[] docIdsToLoad) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("{}", new SearchContextSourcePrinter(context)); } @@ -65,7 +65,7 @@ public void execute(SearchContext context) { throw new TaskCancelledException("cancelled"); } - if (context.docIdsToLoad() == null || context.docIdsToLoad().length == 0) { + if (docIdsToLoad == null || docIdsToLoad.length == 0) { // no individual hits to process, so we shortcut SearchHits hits = new SearchHits(new SearchHit[0], context.queryResult().getTotalHits(), context.queryResult().getMaxScore()); context.fetchResult().shardResult(hits, null); @@ -75,7 +75,7 @@ public void execute(SearchContext context) { Profiler profiler = context.getProfilers() == null ? Profiler.NOOP : Profilers.startProfilingFetchPhase(); SearchHits hits = null; try { - hits = buildSearchHits(context, profiler); + hits = buildSearchHits(context, docIdsToLoad, profiler); } finally { // Always finish profiling ProfileResult profileResult = profiler.finish(); @@ -91,12 +91,12 @@ private static class PreloadedSourceProvider implements SourceProvider { Source source; @Override - public Source getSource(LeafReaderContext ctx, int doc) throws IOException { + public Source getSource(LeafReaderContext ctx, int doc) { return source; } } - private SearchHits buildSearchHits(SearchContext context, Profiler profiler) { + private SearchHits buildSearchHits(SearchContext context, int[] docIdsToLoad, Profiler profiler) { FetchContext fetchContext = new FetchContext(context); SourceLoader sourceLoader = context.newSourceLoader(); @@ -166,7 +166,7 @@ protected SearchHit nextDoc(int doc) throws IOException { } }; - SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), context.docIdsToLoad()); + SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), docIdsToLoad); if (context.isCancelled()) { throw new TaskCancelledException("cancelled"); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index c25c3575a8c4b..725b723b5155f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -11,12 +11,15 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.profile.ProfileResult; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; @@ -28,6 +31,8 @@ public final class FetchSearchResult extends SearchPhaseResult { private ProfileResult profileResult; + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> hits = null)); + public FetchSearchResult() {} public FetchSearchResult(ShardSearchContextId id, SearchShardTarget shardTarget) { @@ -90,4 +95,24 @@ public int counterGetAndIncrement() { public ProfileResult profileResult() { return profileResult; } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java index 78d6882472ebd..bb838c29ff54c 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java @@ -16,6 +16,7 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; @@ -25,27 +26,25 @@ public final class QueryFetchSearchResult extends SearchPhaseResult { private final FetchSearchResult fetchResult; private final RefCounted refCounted; + public static QueryFetchSearchResult of(QuerySearchResult queryResult, FetchSearchResult fetchResult) { + // We're acquiring a copy, we should incRef it + queryResult.incRef(); + fetchResult.incRef(); + return new QueryFetchSearchResult(queryResult, fetchResult); + } + public QueryFetchSearchResult(StreamInput in) throws IOException { - super(in); // These get a ref count of 1 when we create them, so we don't need to incRef here - queryResult = new QuerySearchResult(in); - fetchResult = new FetchSearchResult(in); - refCounted = AbstractRefCounted.of(() -> { - queryResult.decRef(); - fetchResult.decRef(); - }); + this(new QuerySearchResult(in), new FetchSearchResult(in)); } - public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { + private QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { this.queryResult = queryResult; this.fetchResult = fetchResult; - // We're acquiring a copy, we should incRef it - this.queryResult.incRef(); - this.fetchResult.incRef(); - refCounted = AbstractRefCounted.of(() -> { + refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> { queryResult.decRef(); fetchResult.decRef(); - }); + })); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java index 9ce93a825f849..86f6db0b681d7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchRequest.java @@ -37,7 +37,8 @@ public class ShardFetchRequest extends TransportRequest { private final int[] docIds; - private ScoreDoc lastEmittedDoc; + @Nullable + private final ScoreDoc lastEmittedDoc; public ShardFetchRequest(ShardSearchContextId contextId, List docIds, ScoreDoc lastEmittedDoc) { this.contextId = contextId; @@ -60,6 +61,8 @@ public ShardFetchRequest(StreamInput in) throws IOException { lastEmittedDoc = Lucene.readScoreDoc(in); } else if (flag != 0) { throw new IOException("Unknown flag: " + flag); + } else { + lastEmittedDoc = null; } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java index ae0e52ab69091..c3a91fde896bd 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsContext.java @@ -115,7 +115,7 @@ public boolean equals(Object o) { StoredFieldsContext that = (StoredFieldsContext) o; if (fetchFields != that.fetchFields) return false; - return fieldNames != null ? fieldNames.equals(that.fieldNames) : that.fieldNames == null; + return Objects.equals(fieldNames, that.fieldNames); } @@ -164,7 +164,7 @@ public static StoredFieldsContext fromXContent(String fieldName, XContentParser return fromList(Collections.singletonList(parser.text())); } else if (token == XContentParser.Token.START_ARRAY) { ArrayList list = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { list.add(parser.text()); } return fromList(list); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java index 45054a90c749f..87cbf9b1d6b85 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/StoredFieldsSpec.java @@ -27,17 +27,20 @@ public boolean noRequirements() { /** * Use when no stored fields are required */ - public static StoredFieldsSpec NO_REQUIREMENTS = new StoredFieldsSpec(false, false, Set.of()); + public static final StoredFieldsSpec NO_REQUIREMENTS = new StoredFieldsSpec(false, false, Set.of()); /** * Use when the source should be loaded but no other stored fields are required */ - public static StoredFieldsSpec NEEDS_SOURCE = new StoredFieldsSpec(true, false, Set.of()); + public static final StoredFieldsSpec NEEDS_SOURCE = new StoredFieldsSpec(true, false, Set.of()); /** * Combine these stored field requirements with those from another StoredFieldsSpec */ public StoredFieldsSpec merge(StoredFieldsSpec other) { + if (this == other) { + return this; + } Set mergedFields = new HashSet<>(this.requiredStoredFields); mergedFields.addAll(other.requiredStoredFields); return new StoredFieldsSpec( diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java index bba614dce78a5..4587d7560b2d9 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java @@ -26,8 +26,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; -import java.util.function.Function; /** * Context used to fetch the {@code _source}. @@ -42,7 +40,6 @@ public class FetchSourceContext implements Writeable, ToXContentObject { private final boolean fetchSource; private final String[] includes; private final String[] excludes; - private Function, Map> filter; public static FetchSourceContext of(boolean fetchSource) { return fetchSource ? FETCH_SOURCE : DO_NOT_FETCH_SOURCE; @@ -153,33 +150,9 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { if (INCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - List includesList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - includesList.add(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + currentFieldName + "].", - parser.getTokenLocation() - ); - } - } - includes = includesList.toArray(Strings.EMPTY_ARRAY); + includes = parseStringArray(parser, currentFieldName); } else if (EXCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - List excludesList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - excludesList.add(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + currentFieldName + "].", - parser.getTokenLocation() - ); - } - } - excludes = excludesList.toArray(Strings.EMPTY_ARRAY); + excludes = parseStringArray(parser, currentFieldName); } else { throw new ParsingException( parser.getTokenLocation(), @@ -227,6 +200,25 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx return FetchSourceContext.of(fetchSource, includes, excludes); } + private static String[] parseStringArray(XContentParser parser, String currentFieldName) throws IOException { + XContentParser.Token token; + String[] excludes; + List excludesList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + excludesList.add(parser.text()); + } else { + throw new ParsingException( + parser.getTokenLocation(), + "Unknown key for a " + token + " in [" + currentFieldName + "].", + parser.getTokenLocation() + ); + } + } + excludes = excludesList.toArray(Strings.EMPTY_ARRAY); + return excludes; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (fetchSource) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java index 44e9a2a6e5193..feb0547a32536 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java @@ -89,11 +89,10 @@ private void hitExecute(Map innerHi for (int j = 0; j < topDoc.topDocs.scoreDocs.length; j++) { docIdsToLoad[j] = topDoc.topDocs.scoreDocs[j].doc; } - innerHitsContext.docIdsToLoad(docIdsToLoad); innerHitsContext.setRootId(hit.getId()); innerHitsContext.setRootLookup(rootSource); - fetchPhase.execute(innerHitsContext); + fetchPhase.execute(innerHitsContext, docIdsToLoad); FetchSearchResult fetchResult = innerHitsContext.fetchResult(); SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); for (int j = 0; j < internalHits.length; j++) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 3207f1ffa99f0..36cda88a063ec 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -451,13 +451,6 @@ public HB boundaryScannerLocale(String boundaryScannerLocale) { return (HB) this; } - /** - * @return the value set by {@link #boundaryScannerLocale(String)} - */ - public Locale boundaryScannerLocale() { - return this.boundaryScannerLocale; - } - /** * Allows to set custom options for custom highlighters. */ diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java index 31e704fe30ff9..cae353bb91014 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.WeightedSpanTerm; @@ -22,18 +21,6 @@ public final class CustomQueryScorer extends QueryScorer { - public CustomQueryScorer(Query query, IndexReader reader, String field, String defaultField) { - super(query, reader, field, defaultField); - } - - public CustomQueryScorer(Query query, IndexReader reader, String field) { - super(query, reader, field); - } - - public CustomQueryScorer(Query query, String field, String defaultField) { - super(query, field, defaultField); - } - public CustomQueryScorer(Query query, String field) { super(query, field); } @@ -42,10 +29,6 @@ public CustomQueryScorer(Query query) { super(query); } - public CustomQueryScorer(WeightedSpanTerm[] weightedTerms) { - super(weightedTerms); - } - @Override protected WeightedSpanTermExtractor newTermExtractor(String defaultField) { return defaultField == null ? new CustomWeightedSpanTermExtractor() : new CustomWeightedSpanTermExtractor(defaultField); @@ -69,7 +52,6 @@ protected void extractUnknownQuery(Query query, Map te protected void extract(Query query, float boost, Map terms) throws IOException { if (isChildOrParentQuery(query.getClass())) { // skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999 - return; } else if (query instanceof FunctionScoreQuery) { super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms); } else if (query instanceof ESToParentBlockJoinQuery) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java index d90aba24a94df..e77436ba61423 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java @@ -32,7 +32,6 @@ import org.elasticsearch.lucene.search.uhighlight.Snippet; import org.elasticsearch.search.fetch.FetchContext; import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.fetch.FetchSubPhase.HitContext; import java.io.IOException; import java.text.BreakIterator; @@ -120,7 +119,7 @@ CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) { fieldContext.context.getSearchExecutionContext().getIndexAnalyzer(f -> Lucene.KEYWORD_ANALYZER), queryMaxAnalyzedOffset ); - PassageFormatter passageFormatter = getPassageFormatter(fieldContext.hitContext, fieldContext.field, encoder); + PassageFormatter passageFormatter = getPassageFormatter(fieldContext.field, encoder); IndexSearcher searcher = fieldContext.context.searcher(); OffsetSource offsetSource = getOffsetSource(fieldContext.context, fieldContext.fieldType); BreakIterator breakIterator; @@ -161,7 +160,7 @@ CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) { ); } - protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchHighlightContext.Field field, Encoder encoder) { + protected PassageFormatter getPassageFormatter(SearchHighlightContext.Field field, Encoder encoder) { return new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index 75a1777ae7d8f..8417c9d747981 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -312,6 +312,6 @@ private static class FieldHighlightEntry { private static class HighlighterEntry { public org.apache.lucene.search.vectorhighlight.FastVectorHighlighter fvh; - public Map fields = new HashMap<>(); + public final Map fields = new HashMap<>(); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java index 9c761936863d6..5421cd59a23e4 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java @@ -8,15 +8,11 @@ package org.elasticsearch.search.fetch.subphase.highlight; -import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter; import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo; import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo; import org.apache.lucene.search.vectorhighlight.FragmentsBuilder; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.index.analysis.AnalyzerComponentsProvider; -import org.elasticsearch.index.analysis.NamedAnalyzer; -import org.elasticsearch.index.analysis.TokenFilterFactory; import java.util.List; @@ -45,7 +41,7 @@ public static WeightedFragInfo fixWeightedFragInfo(WeightedFragInfo fragInfo) { CollectionUtil.introSort(subInfos, (o1, o2) -> { int startOffset = o1.getTermsOffsets().get(0).getStartOffset(); int startOffset2 = o2.getTermsOffsets().get(0).getStartOffset(); - return compare(startOffset, startOffset2); + return Integer.compare(startOffset, startOffset2); }); return new WeightedFragInfo( Math.min(fragInfo.getSubInfos().get(0).getTermsOffsets().get(0).getStartOffset(), fragInfo.getStartOffset()), @@ -58,23 +54,4 @@ public static WeightedFragInfo fixWeightedFragInfo(WeightedFragInfo fragInfo) { } } - private static int compare(int x, int y) { - return (x < y) ? -1 : ((x == y) ? 0 : 1); - } - - private static boolean containsBrokenAnalysis(Analyzer analyzer) { - // TODO maybe we need a getter on Namedanalyzer that tells if this uses broken Analysis - if (analyzer instanceof NamedAnalyzer) { - analyzer = ((NamedAnalyzer) analyzer).analyzer(); - } - if (analyzer instanceof AnalyzerComponentsProvider) { - final TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters(); - for (TokenFilterFactory tokenFilterFactory : tokenFilters) { - if (tokenFilterFactory.breaksFastVectorHighlighter()) { - return true; - } - } - } - return false; - } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index 7d371ac372774..0042b1eafba71 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -45,8 +45,6 @@ * @see org.elasticsearch.search.builder.SearchSourceBuilder#highlight() */ public final class HighlightBuilder extends AbstractHighlighterBuilder { - /** default for whether to highlight fields based on the source even if stored separately */ - public static final boolean DEFAULT_FORCE_SOURCE = false; /** default for whether a field should be highlighted only if a query matches that field */ public static final boolean DEFAULT_REQUIRE_FIELD_MATCH = true; /** default for whether to stop highlighting at the defined max_analyzed_offset to avoid exceptions for longer texts */ @@ -149,17 +147,6 @@ public HighlightBuilder field(String name) { return field(new Field(name)); } - /** - * Adds a field to be highlighted with a provided fragment size (in characters), and - * default number of fragments of 5. - * - * @param name The field to highlight - * @param fragmentSize The size of a fragment in characters - */ - public HighlightBuilder field(String name, int fragmentSize) { - return field(new Field(name).fragmentSize(fragmentSize)); - } - /** * Adds a field to be highlighted with a provided fragment size (in characters), and * a provided (maximum) number of fragments. diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java index d4b5234f4e0b2..6bc9f65ac655f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java @@ -30,23 +30,12 @@ */ public class HighlightField implements ToXContentFragment, Writeable { - private String name; + private final String name; - private Text[] fragments; + private final Text[] fragments; public HighlightField(StreamInput in) throws IOException { - name = in.readString(); - if (in.readBoolean()) { - int size = in.readVInt(); - if (size == 0) { - fragments = Text.EMPTY_ARRAY; - } else { - fragments = new Text[size]; - for (int i = 0; i < size; i++) { - fragments[i] = in.readText(); - } - } - } + this(in.readString(), in.readOptionalArray(StreamInput::readText, Text[]::new)); } public HighlightField(String name, Text[] fragments) { @@ -61,13 +50,6 @@ public String name() { return name; } - /** - * The name of the field highlighted. - */ - public String getName() { - return name(); - } - /** * The highlighted fragments. {@code null} if failed to highlight (for example, the field is not stored). */ @@ -75,13 +57,6 @@ public Text[] fragments() { return fragments; } - /** - * The highlighted fragments. {@code null} if failed to highlight (for example, the field is not stored). - */ - public Text[] getFragments() { - return fragments(); - } - @Override public String toString() { return "[" + name + "], fragments[" + Arrays.toString(fragments) + "]"; @@ -101,14 +76,14 @@ public void writeTo(StreamOutput out) throws IOException { public static HighlightField fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); String fieldName = parser.currentName(); - Text[] fragments = null; + Text[] fragments; XContentParser.Token token = parser.nextToken(); if (token == XContentParser.Token.START_ARRAY) { List values = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { values.add(new Text(parser.text())); } - fragments = values.toArray(new Text[values.size()]); + fragments = values.toArray(Text.EMPTY_ARRAY); } else if (token == XContentParser.Token.VALUE_NULL) { fragments = null; } else { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java index f1bb3f2c773ac..79c7198564be5 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceScoreOrderFragmentsBuilder.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import org.apache.lucene.document.Field; -import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.vectorhighlight.BoundaryScanner; @@ -20,8 +19,6 @@ import org.elasticsearch.search.lookup.Source; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder { @@ -51,19 +48,7 @@ public SourceScoreOrderFragmentsBuilder( @Override protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException { // we know its low level reader, and matching docId, since that's how we call the highlighter with - List values = valueFetcher.fetchValues(source, docId, new ArrayList<>()); - if (values.size() > 1 && fetchContext.sourceLoader().reordersFieldValues()) { - throw new IllegalArgumentException( - "The fast vector highlighter doesn't support loading multi-valued fields from _source in index [" - + fetchContext.getIndexName() - + "] because _source can reorder field values" - ); - } - Field[] fields = new Field[values.size()]; - for (int i = 0; i < values.size(); i++) { - fields[i] = new Field(fieldType.name(), values.get(i).toString(), TextField.TYPE_NOT_STORED); - } - return fields; + return SourceSimpleFragmentsBuilder.doGetFields(docId, valueFetcher, source, fetchContext, fieldType); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java index 0a7a5d300339b..c6b69717b8f75 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/SourceSimpleFragmentsBuilder.java @@ -46,6 +46,11 @@ public SourceSimpleFragmentsBuilder( @Override protected Field[] getFields(IndexReader reader, int docId, String fieldName) throws IOException { // we know its low level reader, and matching docId, since that's how we call the highlighter with + return doGetFields(docId, valueFetcher, source, fetchContext, fieldType); + } + + static Field[] doGetFields(int docId, ValueFetcher valueFetcher, Source source, FetchContext fetchContext, MappedFieldType fieldType) + throws IOException { List values = valueFetcher.fetchValues(source, docId, new ArrayList<>()); if (values.isEmpty()) { return EMPTY_FIELDS; @@ -63,5 +68,4 @@ protected Field[] getFields(IndexReader reader, int docId, String fieldName) thr } return fields; } - } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 3c69db98c7588..b7c77e4968854 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -265,7 +265,7 @@ private static LeafSlice[] computeSlices(List leaves, int min List sortedLeaves = new ArrayList<>(leaves); // Sort by maxDoc, descending: final Comparator leafComparator = Comparator.comparingInt(l -> l.reader().maxDoc()); - Collections.sort(sortedLeaves, leafComparator.reversed()); + sortedLeaves.sort(leafComparator.reversed()); // we add the groups on a priority queue, so we can add orphan leafs to the smallest group final Comparator> groupComparator = Comparator.comparingInt( l -> l.stream().mapToInt(lr -> lr.reader().maxDoc()).sum() diff --git a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java index 794e429bbc473..ecb7833558a6b 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java @@ -142,28 +142,7 @@ public void searchNearestVectors(String field, byte[] target, KnnCollector colle in.searchNearestVectors(field, target, collector, acceptDocs); return; } - // when acceptDocs is null due to no doc deleted, we will instantiate a new one that would - // match all docs to allow timeout checking. - final Bits updatedAcceptDocs = acceptDocs == null ? new Bits.MatchAllBits(maxDoc()) : acceptDocs; - Bits timeoutCheckingAcceptDocs = new Bits() { - private static final int MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK = 10; - private int calls; - - @Override - public boolean get(int index) { - if (calls++ % MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK == 0) { - queryCancellation.checkCancelled(); - } - - return updatedAcceptDocs.get(index); - } - - @Override - public int length() { - return updatedAcceptDocs.length(); - } - }; - in.searchNearestVectors(field, target, collector, timeoutCheckingAcceptDocs); + in.searchNearestVectors(field, target, collector, new TimeOutCheckingBits(acceptDocs)); } @Override @@ -181,29 +160,32 @@ public void searchNearestVectors(String field, float[] target, KnnCollector coll in.searchNearestVectors(field, target, collector, acceptDocs); return; } - // when acceptDocs is null due to no doc deleted, we will instantiate a new one that would - // match all docs to allow timeout checking. - final Bits updatedAcceptDocs = acceptDocs == null ? new Bits.MatchAllBits(maxDoc()) : acceptDocs; - Bits timeoutCheckingAcceptDocs = new Bits() { - private static final int MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK = 10; - private int calls; - - @Override - public boolean get(int index) { - if (calls++ % MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK == 0) { - queryCancellation.checkCancelled(); - } - - return updatedAcceptDocs.get(index); - } + in.searchNearestVectors(field, target, collector, new TimeOutCheckingBits(acceptDocs)); + } + + private class TimeOutCheckingBits implements Bits { + private static final int MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK = 10; + private final Bits updatedAcceptDocs; + private int calls; - @Override - public int length() { - return updatedAcceptDocs.length(); + TimeOutCheckingBits(Bits acceptDocs) { + // when acceptDocs is null due to no doc deleted, we will instantiate a new one that would + // match all docs to allow timeout checking. + this.updatedAcceptDocs = acceptDocs == null ? new Bits.MatchAllBits(maxDoc()) : acceptDocs; + } + + @Override + public boolean get(int index) { + if (calls++ % MAX_CALLS_BEFORE_QUERY_TIMEOUT_CHECK == 0) { + queryCancellation.checkCancelled(); } - }; + return updatedAcceptDocs.get(index); + } - in.searchNearestVectors(field, target, collector, acceptDocs); + @Override + public int length() { + return updatedAcceptDocs.length(); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java b/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java index 5dc0374b73fc6..07fa169642dbf 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java @@ -251,8 +251,7 @@ public void searchNearestVectors(String field, float[] target, KnnCollector coll @Override public String toString() { - final StringBuilder sb = new StringBuilder("FieldUsageTrackingLeafReader(reader="); - return sb.append(in).append(')').toString(); + return "FieldUsageTrackingLeafReader(reader=" + in + ')'; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 67a265127026d..c02a959231a61 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -174,11 +174,6 @@ public boolean sourceRequested() { return in.sourceRequested(); } - @Override - public boolean hasFetchSourceContext() { - return in.hasFetchSourceContext(); - } - @Override public FetchSourceContext fetchSourceContext() { return in.fetchSourceContext(); @@ -364,16 +359,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { in.seqNoAndPrimaryTerm(seqNoAndPrimaryTerm); } - @Override - public int[] docIdsToLoad() { - return in.docIdsToLoad(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - return in.docIdsToLoad(docIdsToLoad); - } - @Override public DfsSearchResult dfsResult() { return in.dfsResult(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java index b8886c3e79a8c..911b647067e63 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java @@ -54,11 +54,6 @@ public Scroll scroll() { return scroll; } - public InternalScrollSearchRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; - } - @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 274dc233ff5c7..512df4d15dcb0 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -44,6 +45,7 @@ import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; import java.util.HashMap; @@ -66,7 +68,14 @@ public abstract class SearchContext implements Releasable { public static final int DEFAULT_TRACK_TOTAL_HITS_UP_TO = 10000; protected final List releasables = new CopyOnWriteArrayList<>(); + private final AtomicBoolean closed = new AtomicBoolean(false); + + { + if (Assertions.ENABLED) { + releasables.add(LeakTracker.wrap(() -> { assert closed.get(); })); + } + } private InnerHitsContext innerHitsContext; private Query rewriteQuery; @@ -182,8 +191,6 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { */ public abstract boolean sourceRequested(); - public abstract boolean hasFetchSourceContext(); - public abstract FetchSourceContext fetchSourceContext(); public abstract SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext); @@ -315,10 +322,6 @@ public Query rewrittenQuery() { /** controls whether the sequence number and primary term of the last modification to each hit should be returned */ public abstract void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm); - public abstract int[] docIdsToLoad(); - - public abstract SearchContext docIdsToLoad(int[] docIdsToLoad); - public abstract DfsSearchResult dfsResult(); /** diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index fbfcfdf9500ed..fe9cfdc87695e 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -240,7 +240,7 @@ public ShardSearchRequest( this.originalIndices = originalIndices; this.readerId = readerId; this.keepAlive = keepAlive; - assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; + assert keepAlive == null || readerId != null : "readerId: null keepAlive: " + keepAlive; this.channelVersion = TransportVersion.current(); this.waitForCheckpoint = waitForCheckpoint; this.waitForCheckpointsTimeout = waitForCheckpointsTimeout; @@ -334,7 +334,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { readerId = null; keepAlive = null; } - assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; + assert keepAlive == null || readerId != null : "readerId: null keepAlive: " + keepAlive; channelVersion = TransportVersion.min(TransportVersion.readVersion(in), in.getTransportVersion()); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { waitForCheckpoint = in.readLong(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index f1fd984aec5ba..8567677aca30a 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -41,8 +41,6 @@ public class SubSearchContext extends FilteredSearchContext { private final FetchSearchResult fetchSearchResult; private final QuerySearchResult querySearchResult; - private int[] docIdsToLoad; - private StoredFieldsContext storedFields; private ScriptFieldsContext scriptFields; private FetchSourceContext fetchSourceContext; @@ -55,9 +53,12 @@ public class SubSearchContext extends FilteredSearchContext { private boolean version; private boolean seqNoAndPrimaryTerm; + @SuppressWarnings("this-escape") public SubSearchContext(SearchContext context) { super(context); + context.addReleasable(this); this.fetchSearchResult = new FetchSearchResult(); + addReleasable(fetchSearchResult::decRef); this.querySearchResult = new QuerySearchResult(); } @@ -107,11 +108,6 @@ public boolean sourceRequested() { return fetchSourceContext != null && fetchSourceContext.fetchSource(); } - @Override - public boolean hasFetchSourceContext() { - return fetchSourceContext != null; - } - @Override public FetchSourceContext fetchSourceContext() { return fetchSourceContext; @@ -279,17 +275,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; } - @Override - public int[] docIdsToLoad() { - return docIdsToLoad; - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - this.docIdsToLoad = docIdsToLoad; - return this; - } - @Override public CollapseContext collapse() { return null; diff --git a/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index bd6971dceb7be..988ea24d0fcc2 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -102,30 +102,27 @@ private FieldFactoryWrapper getFactoryForField(String fieldName) { // Load the field data on behalf of the script. Otherwise, it would require // additional permissions to deal with pagedbytes/ramusagestimator/etc. - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public FieldFactoryWrapper run() { - FieldFactoryWrapper fieldFactory = null; - IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); + return AccessController.doPrivileged((PrivilegedAction) () -> { + IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); - FieldFactoryWrapper docFactory = null; + FieldFactoryWrapper docFactory = null; - if (docFactoryCache.isEmpty() == false) { - docFactory = docFactoryCache.get(fieldName); - } + if (docFactoryCache.isEmpty() == false) { + docFactory = docFactoryCache.get(fieldName); + } - // if this field has already been accessed via the doc-access API and the field-access API - // uses doc values then we share to avoid double-loading - if (docFactory != null && indexFieldData instanceof SourceValueFetcherIndexFieldData == false) { - fieldFactory = docFactory; - } else { - fieldFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); - } + // if this field has already been accessed via the doc-access API and the field-access API + // uses doc values then we share to avoid double-loading + FieldFactoryWrapper fieldFactory; + if (docFactory != null && indexFieldData instanceof SourceValueFetcherIndexFieldData == false) { + fieldFactory = docFactory; + } else { + fieldFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); + } - fieldFactoryCache.put(fieldName, fieldFactory); + fieldFactoryCache.put(fieldName, fieldFactory); - return fieldFactory; - } + return fieldFactory; }); } @@ -150,35 +147,32 @@ private FieldFactoryWrapper getFactoryForDoc(String fieldName) { // Load the field data on behalf of the script. Otherwise, it would require // additional permissions to deal with pagedbytes/ramusagestimator/etc. - return AccessController.doPrivileged(new PrivilegedAction() { - @Override - public FieldFactoryWrapper run() { - FieldFactoryWrapper docFactory = null; - FieldFactoryWrapper fieldFactory = null; - - if (fieldFactoryCache.isEmpty() == false) { - fieldFactory = fieldFactoryCache.get(fieldName); - } + return AccessController.doPrivileged((PrivilegedAction) () -> { + FieldFactoryWrapper docFactory = null; + FieldFactoryWrapper fieldFactory = null; - if (fieldFactory != null) { - IndexFieldData fieldIndexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); + if (fieldFactoryCache.isEmpty() == false) { + fieldFactory = fieldFactoryCache.get(fieldName); + } - // if this field has already been accessed via the field-access API and the field-access API - // uses doc values then we share to avoid double-loading - if (fieldIndexFieldData instanceof SourceValueFetcherIndexFieldData == false) { - docFactory = fieldFactory; - } - } + if (fieldFactory != null) { + IndexFieldData fieldIndexFieldData = fieldDataLookup.apply(fieldType, SCRIPT); - if (docFactory == null) { - IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SEARCH); - docFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); + // if this field has already been accessed via the field-access API and the field-access API + // uses doc values then we share to avoid double-loading + if (fieldIndexFieldData instanceof SourceValueFetcherIndexFieldData == false) { + docFactory = fieldFactory; } + } - docFactoryCache.put(fieldName, docFactory); - - return docFactory; + if (docFactory == null) { + IndexFieldData indexFieldData = fieldDataLookup.apply(fieldType, SEARCH); + docFactory = new FieldFactoryWrapper(indexFieldData.load(reader).getScriptFieldFactory(fieldName)); } + + docFactoryCache.put(fieldName, docFactory); + + return docFactory; }); } diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java index 06f71fbf2514d..f88441b32d08b 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java @@ -37,8 +37,8 @@ public class SearchLookup implements SourceProvider { * The chain of fields for which this lookup was created, used for detecting * loops caused by runtime fields referring to other runtime fields. The chain is empty * for the "top level" lookup created for the entire search. When a lookup is used to load - * fielddata for a field, we fork it and make sure the field name name isn't in the chain, - * then add it to the end. So the lookup for the a field named {@code a} will be {@code ["a"]}. If + * fielddata for a field, we fork it and make sure the field name isn't in the chain, + * then add it to the end. So the lookup for a field named {@code a} will be {@code ["a"]}. If * that field looks up the values of a field named {@code b} then * {@code b}'s chain will contain {@code ["a", "b"]}. */ diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 3044d15ab8552..01015ec8cc78e 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -94,13 +94,14 @@ static void executeRank(SearchContext searchContext) throws QueryPhaseExecutionE if (searchTimedOut) { break; } - RankSearchContext rankSearchContext = new RankSearchContext(searchContext, rankQuery, rankShardContext.windowSize()); - QueryPhase.addCollectorsAndSearch(rankSearchContext); - QuerySearchResult rrfQuerySearchResult = rankSearchContext.queryResult(); - rrfRankResults.add(rrfQuerySearchResult.topDocs().topDocs); - serviceTimeEWMA += rrfQuerySearchResult.serviceTimeEWMA(); - nodeQueueSize = Math.max(nodeQueueSize, rrfQuerySearchResult.nodeQueueSize()); - searchTimedOut = rrfQuerySearchResult.searchTimedOut(); + try (RankSearchContext rankSearchContext = new RankSearchContext(searchContext, rankQuery, rankShardContext.windowSize())) { + QueryPhase.addCollectorsAndSearch(rankSearchContext); + QuerySearchResult rrfQuerySearchResult = rankSearchContext.queryResult(); + rrfRankResults.add(rrfQuerySearchResult.topDocs().topDocs); + serviceTimeEWMA += rrfQuerySearchResult.serviceTimeEWMA(); + nodeQueueSize = Math.max(nodeQueueSize, rrfQuerySearchResult.nodeQueueSize()); + searchTimedOut = rrfQuerySearchResult.searchTimedOut(); + } } querySearchResult.setRankShardResult(rankShardContext.combine(rrfRankResults)); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index edebf602af188..301d7fb219ca7 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.transport.LeakTracker; import java.io.IOException; import java.util.ArrayList; @@ -104,8 +105,8 @@ public QuerySearchResult(ShardSearchContextId contextId, SearchShardTarget shard setSearchShardTarget(shardTarget); isNull = false; setShardSearchRequest(shardSearchRequest); - this.refCounted = AbstractRefCounted.of(this::close); this.toRelease = new ArrayList<>(); + this.refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> Releasables.close(toRelease))); } private QuerySearchResult(boolean isNull) { @@ -245,10 +246,6 @@ public void releaseAggs() { } } - private void close() { - Releasables.close(toRelease); - } - public void addReleasable(Releasable releasable) { toRelease.add(releasable); } diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java index 4b075523c5286..86f7566683d21 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java @@ -59,11 +59,13 @@ public class RankSearchContext extends SearchContext { private final int windowSize; private final QuerySearchResult querySearchResult; + @SuppressWarnings("this-escape") public RankSearchContext(SearchContext parent, Query rankQuery, int windowSize) { this.parent = parent; this.rankQuery = parent.buildFilteredQuery(rankQuery); this.windowSize = windowSize; this.querySearchResult = new QuerySearchResult(parent.readerContext().id(), parent.shardTarget(), parent.request()); + this.addReleasable(querySearchResult::decRef); } @Override @@ -320,11 +322,6 @@ public boolean sourceRequested() { throw new UnsupportedOperationException(); } - @Override - public boolean hasFetchSourceContext() { - throw new UnsupportedOperationException(); - } - @Override public FetchSourceContext fetchSourceContext() { throw new UnsupportedOperationException(); @@ -485,16 +482,6 @@ public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { throw new UnsupportedOperationException(); } - @Override - public int[] docIdsToLoad() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad) { - throw new UnsupportedOperationException(); - } - @Override public DfsSearchResult dfsResult() { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 389e3a56cf152..c873717fe55e7 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -197,9 +197,6 @@ public void setScoreMode(QueryRescoreMode scoreMode) { this.scoreMode = scoreMode; } - public void setScoreMode(String scoreMode) { - setScoreMode(QueryRescoreMode.fromString(scoreMode)); - } } } diff --git a/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java b/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java index a7977c18d338c..de081fd386d54 100644 --- a/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java +++ b/server/src/main/java/org/elasticsearch/search/runtime/GeoPointScriptFieldDistanceFeatureQuery.java @@ -114,7 +114,6 @@ private class DistanceScorer extends Scorer { private final TwoPhaseIterator twoPhase; private final DocIdSetIterator disi; private final float weight; - private double maxDistance = GeoUtils.EARTH_MEAN_RADIUS_METERS * Math.PI; protected DistanceScorer(Weight weight, AbstractLongFieldScript script, int maxDoc, float boost) { super(weight); diff --git a/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java b/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java index 68ee36f5c0883..249f2c95ddc7f 100644 --- a/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java @@ -152,24 +152,18 @@ static Object convertValueFromSortField(Object value, SortField sortField, DocVa private static Object convertValueFromSortType(String fieldName, SortField.Type sortType, Object value, DocValueFormat format) { try { switch (sortType) { - case DOC: + case DOC, INT: if (value instanceof Number) { return ((Number) value).intValue(); } return Integer.parseInt(value.toString()); - case SCORE: + case SCORE, FLOAT: if (value instanceof Number) { return ((Number) value).floatValue(); } return Float.parseFloat(value.toString()); - case INT: - if (value instanceof Number) { - return ((Number) value).intValue(); - } - return Integer.parseInt(value.toString()); - case DOUBLE: if (value instanceof Number) { return ((Number) value).doubleValue(); @@ -187,12 +181,6 @@ private static Object convertValueFromSortType(String fieldName, SortField.Type () -> { throw new IllegalStateException("now() is not allowed in [search_after] key"); } ); - case FLOAT: - if (value instanceof Number) { - return ((Number) value).floatValue(); - } - return Float.parseFloat(value.toString()); - case STRING_VAL: case STRING: if (value instanceof BytesRef bytesRef) { diff --git a/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java b/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java index ae2f7fc4ecbbb..b1b30856324b4 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java +++ b/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.List; -import java.util.Locale; import static java.util.Collections.emptyList; @@ -110,7 +109,7 @@ interface Loader { public void swap(long lhs, long rhs) {} @Override - public Loader loader(LeafReaderContext ctx) throws IOException { + public Loader loader(LeafReaderContext ctx) { return (index, doc) -> {}; } }; @@ -254,24 +253,6 @@ public boolean inHeapMode(long bucket) { */ protected abstract void swap(long lhs, long rhs); - /** - * Return a fairly human readable representation of the array backing the sort. - *

    - * This is intentionally not a {@link #toString()} implementation because it'll - * be quite slow. - *

    - */ - protected final String debugFormat() { - StringBuilder b = new StringBuilder(); - for (long index = 0; index < values().size(); index++) { - if (index % bucketSize == 0) { - b.append('\n').append(String.format(Locale.ROOT, "%20d", index / bucketSize)).append(": "); - } - b.append(String.format(Locale.ROOT, "%20s", getValue(index))).append(' '); - } - return b.toString(); - } - /** * Initialize the gather offsets after setting up values. Subclasses * should call this once, after setting up their {@link #values()}. @@ -415,7 +396,6 @@ public final void collect(int doc, long bucket) throws IOException { } else { setNextGatherOffset(rootIndex, next - 1); } - return; } /** diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 2dceca2e9ad65..8b07a9e48a660 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -651,14 +651,7 @@ private NumericDoubleValues getNumericDoubleValues(LeafReaderContext context) th final BitSet rootDocs = nested.rootDocs(context); final DocIdSetIterator innerDocs = nested.innerDocs(context); final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; - return localSortMode.select( - distanceValues, - Double.POSITIVE_INFINITY, - rootDocs, - innerDocs, - context.reader().maxDoc(), - maxChildren - ); + return localSortMode.select(distanceValues, Double.POSITIVE_INFINITY, rootDocs, innerDocs, maxChildren); } } diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 5d11563b5d8ed..0c9b56b1855d7 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -100,8 +100,12 @@ public SortFieldAndFormat build(SearchExecutionContext context) { } @Override - public BucketedSort buildBucketedSort(SearchExecutionContext context, BigArrays bigArrays, int bucketSize, BucketedSort.ExtraData extra) - throws IOException { + public BucketedSort buildBucketedSort( + SearchExecutionContext context, + BigArrays bigArrays, + int bucketSize, + BucketedSort.ExtraData extra + ) { return new BucketedSort.ForFloats(bigArrays, order, DocValueFormat.RAW, bucketSize, extra) { @Override public boolean needsScores() { @@ -109,7 +113,7 @@ public boolean needsScores() { } @Override - public Leaf forLeaf(LeafReaderContext ctx) throws IOException { + public Leaf forLeaf(LeafReaderContext ctx) { return new BucketedSort.ForFloats.Leaf(ctx) { private Scorable scorer; private float score; @@ -165,7 +169,7 @@ public TransportVersion getMinimalSupportedVersion() { } @Override - public ScoreSortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + public ScoreSortBuilder rewrite(QueryRewriteContext ctx) { return this; } diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 4ac7348a6c4a4..a0745d0f9c64a 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -295,7 +295,7 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx final BytesRefBuilder spare = new BytesRefBuilder(); @Override - public boolean advanceExact(int doc) throws IOException { + public boolean advanceExact(int doc) { leafScript.setDocument(doc); return true; } @@ -343,7 +343,7 @@ protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws leafScript = numberSortScript.newInstance(new DocValuesDocReader(searchLookup, context)); final NumericDoubleValues values = new NumericDoubleValues() { @Override - public boolean advanceExact(int doc) throws IOException { + public boolean advanceExact(int doc) { leafScript.setDocument(doc); return true; } @@ -374,7 +374,7 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx final BinaryDocValues values = new AbstractBinaryDocValues() { @Override - public boolean advanceExact(int doc) throws IOException { + public boolean advanceExact(int doc) { leafScript.setDocument(doc); return true; } diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java index 34363a614a7e4..c0bcbdc98e35f 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilders.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.script.Script; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; @@ -65,16 +64,6 @@ public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, double la return new GeoDistanceSortBuilder(fieldName, lat, lon); } - /** - * Constructs a new distance based sort on a geo point like field. - * - * @param fieldName The geo point like field name. - * @param points The points to create the range distance facets from. - */ - public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, GeoPoint... points) { - return new GeoDistanceSortBuilder(fieldName, points); - } - /** * Constructs a new distance based sort on a geo point like field. * diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortValue.java b/server/src/main/java/org/elasticsearch/search/sort/SortValue.java index 067439931a85b..ab7dcd6615f79 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortValue.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortValue.java @@ -369,7 +369,6 @@ private static class EmptySortValue extends SortValue { public static final String NAME = "empty"; private static final String EMPTY_STRING = ""; - private int sortValue = 0; private EmptySortValue() {} @@ -381,7 +380,7 @@ public String getWriteableName() { } @Override - public void writeTo(StreamOutput out) throws IOException {} + public void writeTo(StreamOutput out) {} @Override public Object getKey() { @@ -394,7 +393,7 @@ public String format(DocValueFormat format) { } @Override - protected XContentBuilder rawToXContent(XContentBuilder builder) throws IOException { + protected XContentBuilder rawToXContent(XContentBuilder builder) { return builder; } @@ -420,7 +419,7 @@ public String toString() { @Override public int typeComparisonKey() { - return sortValue; + return 0; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java b/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java index 80beb5d2ec7ca..0956a9f94677c 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java @@ -20,17 +20,17 @@ public class DirectSpellcheckerSettings { // NB: If this changes, make sure to change the default in TermBuilderSuggester - public static SuggestMode DEFAULT_SUGGEST_MODE = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; - public static float DEFAULT_ACCURACY = 0.5f; - public static SortBy DEFAULT_SORT = SortBy.SCORE; + public static final SuggestMode DEFAULT_SUGGEST_MODE = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; + public static final float DEFAULT_ACCURACY = 0.5f; + public static final SortBy DEFAULT_SORT = SortBy.SCORE; // NB: If this changes, make sure to change the default in TermBuilderSuggester - public static StringDistance DEFAULT_STRING_DISTANCE = DirectSpellChecker.INTERNAL_LEVENSHTEIN; - public static int DEFAULT_MAX_EDITS = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; - public static int DEFAULT_MAX_INSPECTIONS = 5; - public static float DEFAULT_MAX_TERM_FREQ = 0.01f; - public static int DEFAULT_PREFIX_LENGTH = 1; - public static int DEFAULT_MIN_WORD_LENGTH = 4; - public static float DEFAULT_MIN_DOC_FREQ = 0f; + public static final StringDistance DEFAULT_STRING_DISTANCE = DirectSpellChecker.INTERNAL_LEVENSHTEIN; + public static final int DEFAULT_MAX_EDITS = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; + public static final int DEFAULT_MAX_INSPECTIONS = 5; + public static final float DEFAULT_MAX_TERM_FREQ = 0.01f; + public static final int DEFAULT_PREFIX_LENGTH = 1; + public static final int DEFAULT_MIN_WORD_LENGTH = 4; + public static final float DEFAULT_MIN_DOC_FREQ = 0f; private SuggestMode suggestMode = DEFAULT_SUGGEST_MODE; private float accuracy = DEFAULT_ACCURACY; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java index f126091c785d8..f3371caf4c1a7 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -206,7 +206,6 @@ public int hashCode() { @SuppressWarnings("rawtypes") public abstract static class Suggestion implements Iterable, NamedWriteable, ToXContentFragment { - public static final int TYPE = 0; protected final String name; protected final int size; protected final List entries = new ArrayList<>(5); @@ -635,10 +634,6 @@ public boolean collateMatch() { return (collateMatch != null) ? collateMatch : true; } - protected void setScore(float score) { - this.score = score; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeText(text); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java index 674f936890283..37cc7bb59c253 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java @@ -35,8 +35,8 @@ public abstract static class SuggestionContext { private Analyzer analyzer; private int size = 5; private int shardSize = -1; - private SearchExecutionContext searchExecutionContext; - private Suggester suggester; + private final SearchExecutionContext searchExecutionContext; + private final Suggester suggester; protected SuggestionContext(Suggester suggester, SearchExecutionContext searchExecutionContext) { this.suggester = suggester; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index c71673962ca2d..e088948b18e03 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -56,7 +56,7 @@ */ public final class CompletionSuggestion extends Suggest.Suggestion { - private boolean skipDuplicates; + private final boolean skipDuplicates; /** * Creates a completion suggestion given its name, size and whether it should skip duplicates diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java index 86e18b3e5a406..7a3bc3c67ba6d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java @@ -211,13 +211,6 @@ private CompletionSuggestionBuilder contexts(XContentBuilder contextBuilder) { return this; } - /** - * Returns whether duplicate suggestions should be filtered out. - */ - public boolean skipDuplicates() { - return skipDuplicates; - } - /** * Should duplicates be filtered or not. Defaults to {@code false}. */ diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java index f241b6f89633e..7d7d5516c50ae 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/FuzzyOptions.java @@ -62,12 +62,12 @@ public static Builder builder() { return new Builder(); } - private int editDistance; - private boolean transpositions; - private int fuzzyMinLength; - private int fuzzyPrefixLength; - private boolean unicodeAware; - private int maxDeterminizedStates; + private final int editDistance; + private final boolean transpositions; + private final int fuzzyMinLength; + private final int fuzzyPrefixLength; + private final boolean unicodeAware; + private final int maxDeterminizedStates; private FuzzyOptions( int editDistance, diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java index 0759b413dd664..fdfa1303b2d77 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/RegexOptions.java @@ -61,8 +61,8 @@ static RegexOptions parse(XContentParser parser) throws IOException { return PARSER.parse(parser, null).build(); } - private int flagsValue; - private int maxDeterminizedStates; + private final int flagsValue; + private final int maxDeterminizedStates; private RegexOptions(int flagsValue, int maxDeterminizedStates) { this.flagsValue = flagsValue; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java index bac3b7491a661..31959df6b023e 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java @@ -25,7 +25,7 @@ * the best one per document (sorted by weight) is kept. **/ class TopSuggestGroupDocsCollector extends TopSuggestDocsCollector { - private Map> docContexts = new HashMap<>(); + private final Map> docContexts = new HashMap<>(); /** * Sole constructor diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java index ce0c58463bad2..65c464cac256d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java @@ -72,7 +72,7 @@ public boolean equals(Object o) { if (isPrefix != that.isPrefix) return false; if (boost != that.boost) return false; - return category != null ? category.equals(that.category) : that.category == null; + return Objects.equals(category, that.category); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java index b180e6fd13335..2a83bf289bdef 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextBuilder.java @@ -13,7 +13,7 @@ */ public abstract class ContextBuilder> { - protected String name; + protected final String name; /** * @param name of the context mapper to build diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index c48a1ccb12e6f..d2edd460b926d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -165,41 +165,5 @@ public String toString() { } } - public static class InternalQueryContext { - public final String context; - public final int boost; - public final boolean isPrefix; - - public InternalQueryContext(String context, int boost, boolean isPrefix) { - this.context = context; - this.boost = boost; - this.isPrefix = isPrefix; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - InternalQueryContext that = (InternalQueryContext) o; - - if (boost != that.boost) return false; - if (isPrefix != that.isPrefix) return false; - return context != null ? context.equals(that.context) : that.context == null; - - } - - @Override - public int hashCode() { - int result = context != null ? context.hashCode() : 0; - result = 31 * result + boost; - result = 31 * result + (isPrefix ? 1 : 0); - return result; - } - - @Override - public String toString() { - return "QueryContext{" + "context='" + context + '\'' + ", boost=" + boost + ", isPrefix=" + isPrefix + '}'; - } - } + public record InternalQueryContext(String context, int boost, boolean isPrefix) {} } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java index 9a975fe930979..f7709d7aac911 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java @@ -142,7 +142,7 @@ protected Iterable contexts() { if (typedContexts.isEmpty()) { throw new IllegalArgumentException("Contexts are mandatory in context enabled completion field [" + name + "]"); } - return new ArrayList(typedContexts); + return new ArrayList<>(typedContexts); } } @@ -166,8 +166,8 @@ public ContextQuery toContextQuery(CompletionQuery query, Map internalQueryContext = queryContexts.get(mapping.name()); if (internalQueryContext != null) { for (ContextMapping.InternalQueryContext context : internalQueryContext) { - scratch.append(context.context); - typedContextQuery.addContext(scratch.toCharsRef(), context.boost, context.isPrefix == false); + scratch.append(context.context()); + typedContextQuery.addContext(scratch.toCharsRef(), context.boost(), context.isPrefix() == false); scratch.setLength(1); hasContext = true; } @@ -193,12 +193,8 @@ public Map> getNamedContexts(List contexts) { int typeId = typedContext.charAt(0); assert typeId < contextMappings.size() : "Returned context has invalid type"; ContextMapping mapping = contextMappings.get(typeId); - Set contextEntries = contextMap.get(mapping.name()); - if (contextEntries == null) { - contextEntries = new HashSet<>(); - contextMap.put(mapping.name(), contextEntries); - } - contextEntries.add(typedContext.subSequence(1, typedContext.length()).toString()); + contextMap.computeIfAbsent(mapping.name(), k -> new HashSet<>()) + .add(typedContext.subSequence(1, typedContext.length()).toString()); } return contextMap; } @@ -273,7 +269,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (obj == null || (obj instanceof ContextMappings) == false) { + if ((obj instanceof ContextMappings) == false) { return false; } ContextMappings other = ((ContextMappings) obj); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index 933d2198a2dae..2cd7a751264bd 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -345,19 +345,6 @@ public Builder precision(String precision) { return precision(DistanceUnit.parse(precision, DistanceUnit.METERS, DistanceUnit.METERS)); } - /** - * Set the precision use o make suggestions - * - * @param precision - * precision value - * @param unit - * {@link DistanceUnit} to use - * @return this - */ - public Builder precision(double precision, DistanceUnit unit) { - return precision(unit.toMeters(precision)); - } - /** * Set the precision use o make suggestions * diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java index 61dfb0f075d34..fc29d1ed7a567 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateGenerator.java @@ -17,15 +17,8 @@ //TODO public for tests public abstract class CandidateGenerator { - public abstract boolean isKnownWord(BytesRef term) throws IOException; - public abstract TermStats termStats(BytesRef term) throws IOException; - public CandidateSet drawCandidates(BytesRef term) throws IOException { - CandidateSet set = new CandidateSet(Candidate.EMPTY, createCandidate(term, true)); - return drawCandidates(set); - } - public Candidate createCandidate(BytesRef term, boolean userInput) throws IOException { return createCandidate(term, termStats(term), 1.0, userInput); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java index e379674d02eab..fdc05d12a2389 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java @@ -28,13 +28,13 @@ public Correction[] findBestCandiates(CandidateSet[] sets, float errorFraction, if (sets.length == 0) { return Correction.EMPTY; } - PriorityQueue corrections = new PriorityQueue(maxNumCorrections) { + PriorityQueue corrections = new PriorityQueue<>(maxNumCorrections) { @Override protected boolean lessThan(Correction a, Correction b) { return a.compareTo(b) < 0; } }; - int numMissspellings = 1; + final int numMissspellings; if (errorFraction >= 1.0) { numMissspellings = (int) errorFraction; } else { @@ -62,11 +62,11 @@ public void findCandidates( CandidateSet current = candidates[ord]; if (ord == candidates.length - 1) { path[ord] = current.originalTerm; - updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); + updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, ord, gramSize)); if (numMissspellingsLeft > 0) { for (int i = 0; i < current.candidates.length; i++) { path[ord] = current.candidates[i]; - updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); + updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, ord, gramSize)); } } } else { @@ -79,7 +79,7 @@ public void findCandidates( numMissspellingsLeft, corrections, cutoffScore, - pathScore + scorer.score(path, candidates, ord, gramSize) + pathScore + scorer.score(path, ord, gramSize) ); for (int i = 0; i < current.candidates.length; i++) { path[ord] = current.candidates[i]; @@ -90,20 +90,12 @@ public void findCandidates( numMissspellingsLeft - 1, corrections, cutoffScore, - pathScore + scorer.score(path, candidates, ord, gramSize) + pathScore + scorer.score(path, ord, gramSize) ); } } else { path[ord] = current.originalTerm; - findCandidates( - candidates, - path, - ord + 1, - 0, - corrections, - cutoffScore, - pathScore + scorer.score(path, candidates, ord, gramSize) - ); + findCandidates(candidates, path, ord + 1, 0, corrections, cutoffScore, pathScore + scorer.score(path, ord, gramSize)); } } @@ -135,7 +127,7 @@ private void updateTop( public double score(Candidate[] path, CandidateSet[] candidates) throws IOException { double score = 0.0d; for (int i = 0; i < candidates.length; i++) { - score += scorer.score(path, candidates, i, gramSize); + score += scorer.score(path, i, gramSize); } return Math.exp(score); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index 98143e0acf413..b95971d13c11d 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -97,14 +97,6 @@ public DirectCandidateGenerator( termsEnum = terms.iterator(); } - /* (non-Javadoc) - * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#isKnownWord(org.apache.lucene.util.BytesRef) - */ - @Override - public boolean isKnownWord(BytesRef term) throws IOException { - return termStats(term).docFreq > 0; - } - /* (non-Javadoc) * @see org.elasticsearch.search.suggest.phrase.CandidateGenerator#frequency(org.apache.lucene.util.BytesRef) */ @@ -128,10 +120,6 @@ public TermStats internalTermStats(BytesRef term) throws IOException { return new TermStats(0, 0); } - public String getField() { - return field; - } - @Override public CandidateSet drawCandidates(CandidateSet set) throws IOException { Candidate original = set.originalTerm; @@ -181,15 +169,14 @@ protected BytesRef preFilter(final BytesRef term, final CharsRefBuilder spare, f if (preFilter == null) { return term; } - final BytesRefBuilder result = byteSpare; analyze(preFilter, term, field, new TokenConsumer() { @Override - public void nextToken() throws IOException { - this.fillBytesRef(result); + public void nextToken() { + this.fillBytesRef(byteSpare); } }, spare); - return result.get(); + return byteSpare.get(); } protected void postFilter( @@ -344,11 +331,10 @@ public boolean equals(Object obj) { if (getClass() != obj.getClass()) return false; Candidate other = (Candidate) obj; if (term == null) { - if (other.term != null) return false; + return other.term == null; } else { - if (term.equals(other.term) == false) return false; + return term.equals(other.term) != false; } - return true; } /** Lower scores sort first; if scores are equal, then later (zzz) terms sort first */ @@ -364,7 +350,7 @@ public int compareTo(Candidate other) { } @Override - public Candidate createCandidate(BytesRef term, TermStats termStats, double channelScore, boolean userInput) throws IOException { + public Candidate createCandidate(BytesRef term, TermStats termStats, double channelScore, boolean userInput) { return new Candidate(term, termStats, channelScore, score(termStats, channelScore, sumTotalTermFreq), userInput); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index b3cb3444d2206..a153d4de54dcb 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -437,32 +437,24 @@ public PhraseSuggestionContext.DirectCandidateGenerator build(IndexAnalyzers ind private static SuggestMode resolveSuggestMode(String suggestMode) { suggestMode = suggestMode.toLowerCase(Locale.US); - if ("missing".equals(suggestMode)) { - return SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; - } else if ("popular".equals(suggestMode)) { - return SuggestMode.SUGGEST_MORE_POPULAR; - } else if ("always".equals(suggestMode)) { - return SuggestMode.SUGGEST_ALWAYS; - } else { - throw new IllegalArgumentException("Illegal suggest mode " + suggestMode); - } + return switch (suggestMode) { + case "missing" -> SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; + case "popular" -> SuggestMode.SUGGEST_MORE_POPULAR; + case "always" -> SuggestMode.SUGGEST_ALWAYS; + default -> throw new IllegalArgumentException("Illegal suggest mode " + suggestMode); + }; } static StringDistance resolveDistance(String distanceVal) { distanceVal = distanceVal.toLowerCase(Locale.ROOT); - if ("internal".equals(distanceVal)) { - return DirectSpellChecker.INTERNAL_LEVENSHTEIN; - } else if ("damerau_levenshtein".equals(distanceVal)) { - return new LuceneLevenshteinDistance(); - } else if ("levenshtein".equals(distanceVal)) { - return new LevenshteinDistance(); - } else if ("jaro_winkler".equals(distanceVal)) { - return new JaroWinklerDistance(); - } else if ("ngram".equals(distanceVal)) { - return new NGramDistance(); - } else { - throw new IllegalArgumentException("Illegal distance option " + distanceVal); - } + return switch (distanceVal) { + case "internal" -> DirectSpellChecker.INTERNAL_LEVENSHTEIN; + case "damerau_levenshtein" -> new LuceneLevenshteinDistance(); + case "levenshtein" -> new LevenshteinDistance(); + case "jaro_winkler" -> new JaroWinklerDistance(); + case "ngram" -> new NGramDistance(); + default -> throw new IllegalArgumentException("Illegal distance option " + distanceVal); + }; } private static void transferIfNotNull(T value, Consumer consumer) { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java index fe85dd70b7337..a14bddd03cdec 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java @@ -42,7 +42,7 @@ public final class Laplace extends SmoothingModel { */ public static final double DEFAULT_LAPLACE_ALPHA = 0.5; - private double alpha = DEFAULT_LAPLACE_ALPHA; + private final double alpha; /** * Creates a Laplace smoothing model. diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java index ff752a8e62985..dce063d6e655b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java @@ -15,7 +15,7 @@ import java.io.IOException; final class LaplaceScorer extends WordScorer { - private double alpha; + private final double alpha; LaplaceScorer(IndexReader reader, Terms terms, String field, double realWordLikelihood, BytesRef separator, double alpha) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java index 80ebd9e45acf8..7e804c173da9c 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/MultiCandidateGeneratorWrapper.java @@ -19,18 +19,13 @@ public final class MultiCandidateGeneratorWrapper extends CandidateGenerator { private final CandidateGenerator[] candidateGenerator; - private int numCandidates; + private final int numCandidates; public MultiCandidateGeneratorWrapper(int numCandidates, CandidateGenerator... candidateGenerators) { this.candidateGenerator = candidateGenerators; this.numCandidates = numCandidates; } - @Override - public boolean isKnownWord(BytesRef term) throws IOException { - return candidateGenerator[0].isKnownWord(term); - } - @Override public TermStats termStats(BytesRef term) throws IOException { return candidateGenerator[0].termStats(term); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index 52c15eb214da9..4400852ebbd5a 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -159,7 +159,7 @@ public Suggestion> innerExecute( return response; } - private static TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field) throws IOException { + private static TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field) { spare.copyUTF8Bytes(query); return analyzer.tokenStream(field, new CharArrayReader(spare.chars(), 0, spare.length())); } @@ -174,7 +174,7 @@ protected Suggestion> emptySuggestion( String name, PhraseSuggestionContext suggestion, CharsRefBuilder spare - ) throws IOException { + ) { PhraseSuggestion phraseSuggestion = new PhraseSuggestion(name, suggestion.getSize()); spare.copyUTF8Bytes(suggestion.getText()); phraseSuggestion.addTerm(new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length())); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java index 2cb04b73b7f5f..1c881a9887583 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestion.java @@ -73,13 +73,6 @@ public Entry(StreamInput in) throws IOException { cutoffScore = in.readDouble(); } - /** - * @return cutoff score for suggestions. input term score * confidence for phrase suggest, 0 otherwise - */ - public double getCutoffScore() { - return cutoffScore; - } - @Override protected void merge(Suggestion.Entry