diff --git a/.buildkite/pipelines/dra-workflow.yml b/.buildkite/pipelines/dra-workflow.yml index e7bf19816356f..32a2b7d22134a 100644 --- a/.buildkite/pipelines/dra-workflow.yml +++ b/.buildkite/pipelines/dra-workflow.yml @@ -7,6 +7,7 @@ steps: image: family/elasticsearch-ubuntu-2204 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - wait # The hadoop build depends on the ES artifact # So let's trigger the hadoop build any time we build a new staging artifact diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index f530f237113a9..1a513971b2c10 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -7,6 +7,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - wait - label: part1 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 @@ -16,6 +17,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part2 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 timeout_in_minutes: 300 @@ -24,6 +26,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part3 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 timeout_in_minutes: 300 @@ -32,6 +35,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part4 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 timeout_in_minutes: 300 @@ -40,6 +44,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part5 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 timeout_in_minutes: 300 @@ -48,6 +53,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" @@ -61,6 +67,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: rest-compat @@ -71,6 +78,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - wait - trigger: elasticsearch-dra-workflow label: Trigger DRA snapshot workflow diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index b33fc98ccb01b..4124d4e550d11 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -8,6 +8,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - wait - label: part1 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 @@ -17,6 +18,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part2 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 timeout_in_minutes: 300 @@ -25,6 +27,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part3 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 timeout_in_minutes: 300 @@ -33,6 +36,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part4 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 timeout_in_minutes: 300 @@ -41,6 +45,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part5 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 timeout_in_minutes: 300 @@ -49,6 +54,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" @@ -62,6 +68,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: rest-compat @@ -72,6 +79,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - wait - trigger: elasticsearch-dra-workflow label: Trigger DRA snapshot workflow diff --git a/.buildkite/pipelines/lucene-snapshot/build-snapshot.yml b/.buildkite/pipelines/lucene-snapshot/build-snapshot.yml index 8cf2a8aacbece..1f69b8faa7ab4 100644 --- a/.buildkite/pipelines/lucene-snapshot/build-snapshot.yml +++ b/.buildkite/pipelines/lucene-snapshot/build-snapshot.yml @@ -15,6 +15,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - wait - trigger: "elasticsearch-lucene-snapshot-tests" build: diff --git a/.buildkite/pipelines/lucene-snapshot/run-tests.yml b/.buildkite/pipelines/lucene-snapshot/run-tests.yml index c76c54a56494e..49c3396488d82 100644 --- a/.buildkite/pipelines/lucene-snapshot/run-tests.yml +++ b/.buildkite/pipelines/lucene-snapshot/run-tests.yml @@ -7,6 +7,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - wait: null - label: part1 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 @@ -16,6 +17,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part2 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 timeout_in_minutes: 300 @@ -24,6 +26,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part3 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 timeout_in_minutes: 300 @@ -32,6 +35,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part4 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 timeout_in_minutes: 300 @@ -40,6 +44,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: part5 command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 timeout_in_minutes: 300 @@ -48,6 +53,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" @@ -64,6 +70,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: rest-compat @@ -74,3 +81,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/periodic-packaging.bwc.template.yml b/.buildkite/pipelines/periodic-packaging.bwc.template.yml index b06bc80d3535d..8a6fa2553b204 100644 --- a/.buildkite/pipelines/periodic-packaging.bwc.template.yml +++ b/.buildkite/pipelines/periodic-packaging.bwc.template.yml @@ -11,5 +11,6 @@ image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: $BWC_VERSION diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 406331dda881b..4217fc91bf0fd 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -46,6 +46,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.0.1 @@ -62,6 +63,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.1.1 @@ -78,6 +80,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.2.1 @@ -94,6 +97,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.3.2 @@ -110,6 +114,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.4.2 @@ -126,6 +131,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.5.2 @@ -142,6 +148,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.6.2 @@ -158,6 +165,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.7.1 @@ -174,6 +182,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.8.1 @@ -190,6 +199,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.9.3 @@ -206,6 +216,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.10.2 @@ -222,6 +233,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.11.2 @@ -238,6 +250,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.12.1 @@ -254,6 +267,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.13.4 @@ -270,6 +284,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.14.2 @@ -286,6 +301,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.15.2 @@ -302,6 +318,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.16.3 @@ -318,6 +335,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 7.17.23 @@ -334,6 +352,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.0.1 @@ -350,6 +369,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.1.3 @@ -366,6 +386,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.2.3 @@ -382,6 +403,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.3.3 @@ -398,6 +420,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.4.3 @@ -414,6 +437,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.5.3 @@ -430,6 +454,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.6.2 @@ -446,6 +471,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.7.1 @@ -462,6 +488,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.8.2 @@ -478,6 +505,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.9.2 @@ -494,6 +522,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.10.4 @@ -510,6 +539,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.11.4 @@ -526,6 +556,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.12.2 @@ -542,6 +573,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.13.4 @@ -558,6 +590,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.14.2 @@ -574,6 +607,7 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: 8.15.0 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index d8c5d55fc7e4f..867ebe41ed6af 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -30,6 +30,7 @@ steps: localSsds: 1 localSsdInterface: nvme machineType: custom-32-98304 + diskSizeGb: 250 env: {} - group: platform-support-windows steps: diff --git a/.buildkite/pipelines/periodic.bwc.template.yml b/.buildkite/pipelines/periodic.bwc.template.yml index 43a0a7438d656..b22270dbf221c 100644 --- a/.buildkite/pipelines/periodic.bwc.template.yml +++ b/.buildkite/pipelines/periodic.bwc.template.yml @@ -7,6 +7,7 @@ machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: $BWC_VERSION retry: diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index 207a332ed6717..87e30a0ea73ba 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -25,6 +25,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: example-plugins command: |- cd $$WORKSPACE/plugins/examples @@ -36,6 +37,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - group: java-fips-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-fips-matrix" @@ -57,6 +59,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" @@ -73,6 +76,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" BWC_VERSION: "{{matrix.BWC_VERSION}}" @@ -101,6 +105,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" @@ -121,6 +126,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" BWC_VERSION: "{{matrix.BWC_VERSION}}" @@ -156,6 +162,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / azure command: | export azure_storage_container=elasticsearch-ci-thirdparty @@ -170,6 +177,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / gcs command: | export google_storage_bucket=elasticsearch-ci-thirdparty @@ -184,6 +192,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / geoip command: | .ci/scripts/run-gradle.sh :modules:ingest-geoip:internalClusterTest -Dtests.jvm.argline="-Dgeoip_use_service=true" @@ -193,6 +202,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / s3 command: | export amazon_s3_bucket=elasticsearch-ci.us-west-2 @@ -207,6 +217,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: Upload Snyk Dependency Graph command: .ci/scripts/run-gradle.sh uploadSnykDependencyGraph -PsnykTargetReference=$BUILDKITE_BRANCH env: @@ -217,6 +228,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 if: build.branch == "main" || build.branch == "7.17" - label: check-branch-consistency command: .ci/scripts/run-gradle.sh branchConsistency @@ -225,6 +237,7 @@ steps: provider: gcp image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-2 + diskSizeGb: 250 - label: check-branch-protection-rules command: .buildkite/scripts/branch-protection.sh timeout_in_minutes: 5 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 32da1db652239..06e7ffbc8fb1c 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -11,6 +11,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.0.1 retry: @@ -30,6 +31,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.1.1 retry: @@ -49,6 +51,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.2.1 retry: @@ -68,6 +71,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.3.2 retry: @@ -87,6 +91,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.4.2 retry: @@ -106,6 +111,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.5.2 retry: @@ -125,6 +131,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.6.2 retry: @@ -144,6 +151,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.7.1 retry: @@ -163,6 +171,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.8.1 retry: @@ -182,6 +191,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.9.3 retry: @@ -201,6 +211,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.10.2 retry: @@ -220,6 +231,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.11.2 retry: @@ -239,6 +251,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.12.1 retry: @@ -258,6 +271,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.13.4 retry: @@ -277,6 +291,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.14.2 retry: @@ -296,6 +311,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.15.2 retry: @@ -315,6 +331,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.16.3 retry: @@ -334,6 +351,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 7.17.23 retry: @@ -353,6 +371,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.0.1 retry: @@ -372,6 +391,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.1.3 retry: @@ -391,6 +411,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.2.3 retry: @@ -410,6 +431,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.3.3 retry: @@ -429,6 +451,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.4.3 retry: @@ -448,6 +471,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.5.3 retry: @@ -467,6 +491,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.6.2 retry: @@ -486,6 +511,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.7.1 retry: @@ -505,6 +531,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.8.2 retry: @@ -524,6 +551,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.9.2 retry: @@ -543,6 +571,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.10.4 retry: @@ -562,6 +591,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.11.4 retry: @@ -581,6 +611,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.12.2 retry: @@ -600,6 +631,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.13.4 retry: @@ -619,6 +651,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.14.2 retry: @@ -638,6 +671,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk preemptible: true + diskSizeGb: 250 env: BWC_VERSION: 8.15.0 retry: @@ -672,6 +706,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: example-plugins command: |- cd $$WORKSPACE/plugins/examples @@ -683,6 +718,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - group: java-fips-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-fips-matrix" @@ -704,6 +740,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" @@ -720,6 +757,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" BWC_VERSION: "{{matrix.BWC_VERSION}}" @@ -748,6 +786,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" @@ -768,6 +807,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" BWC_VERSION: "{{matrix.BWC_VERSION}}" @@ -803,6 +843,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / azure command: | export azure_storage_container=elasticsearch-ci-thirdparty @@ -817,6 +858,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / gcs command: | export google_storage_bucket=elasticsearch-ci-thirdparty @@ -831,6 +873,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / geoip command: | .ci/scripts/run-gradle.sh :modules:ingest-geoip:internalClusterTest -Dtests.jvm.argline="-Dgeoip_use_service=true" @@ -840,6 +883,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: third-party / s3 command: | export amazon_s3_bucket=elasticsearch-ci.us-west-2 @@ -854,6 +898,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 - label: Upload Snyk Dependency Graph command: .ci/scripts/run-gradle.sh uploadSnykDependencyGraph -PsnykTargetReference=$BUILDKITE_BRANCH env: @@ -864,6 +909,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk + diskSizeGb: 250 if: build.branch == "main" || build.branch == "7.17" - label: check-branch-consistency command: .ci/scripts/run-gradle.sh branchConsistency @@ -872,6 +918,7 @@ steps: provider: gcp image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-2 + diskSizeGb: 250 - label: check-branch-protection-rules command: .buildkite/scripts/branch-protection.sh timeout_in_minutes: 5 diff --git a/.buildkite/pipelines/pull-request/build-benchmark.yml b/.buildkite/pipelines/pull-request/build-benchmark.yml index 8d3215b8393ce..96330bee03638 100644 --- a/.buildkite/pipelines/pull-request/build-benchmark.yml +++ b/.buildkite/pipelines/pull-request/build-benchmark.yml @@ -22,3 +22,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/bwc-snapshots.yml b/.buildkite/pipelines/pull-request/bwc-snapshots.yml index 5a9fc2d938ac0..8f59e593b286f 100644 --- a/.buildkite/pipelines/pull-request/bwc-snapshots.yml +++ b/.buildkite/pipelines/pull-request/bwc-snapshots.yml @@ -18,3 +18,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/cloud-deploy.yml b/.buildkite/pipelines/pull-request/cloud-deploy.yml index ce8e8206d51ff..2932f874c5cf8 100644 --- a/.buildkite/pipelines/pull-request/cloud-deploy.yml +++ b/.buildkite/pipelines/pull-request/cloud-deploy.yml @@ -11,3 +11,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/docs-check.yml b/.buildkite/pipelines/pull-request/docs-check.yml index 2201eb2d1e4ea..3bf1e43697a7c 100644 --- a/.buildkite/pipelines/pull-request/docs-check.yml +++ b/.buildkite/pipelines/pull-request/docs-check.yml @@ -12,3 +12,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/eql-correctness.yml b/.buildkite/pipelines/pull-request/eql-correctness.yml index 8f7ca6942c0e9..d85827d10e886 100644 --- a/.buildkite/pipelines/pull-request/eql-correctness.yml +++ b/.buildkite/pipelines/pull-request/eql-correctness.yml @@ -7,3 +7,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/example-plugins.yml b/.buildkite/pipelines/pull-request/example-plugins.yml index 18d0de6594980..fb4a17fb214cb 100644 --- a/.buildkite/pipelines/pull-request/example-plugins.yml +++ b/.buildkite/pipelines/pull-request/example-plugins.yml @@ -16,3 +16,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/full-bwc.yml b/.buildkite/pipelines/pull-request/full-bwc.yml index d3fa8eccaf7d9..c404069bd0e60 100644 --- a/.buildkite/pipelines/pull-request/full-bwc.yml +++ b/.buildkite/pipelines/pull-request/full-bwc.yml @@ -13,3 +13,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/packaging-upgrade-tests.yml b/.buildkite/pipelines/pull-request/packaging-upgrade-tests.yml index c62cf23310422..970dafbb28647 100644 --- a/.buildkite/pipelines/pull-request/packaging-upgrade-tests.yml +++ b/.buildkite/pipelines/pull-request/packaging-upgrade-tests.yml @@ -18,5 +18,6 @@ steps: image: family/elasticsearch-{{matrix.image}} machineType: custom-16-32768 buildDirectory: /dev/shm/bk + diskSizeGb: 250 env: BWC_VERSION: $BWC_VERSION diff --git a/.buildkite/pipelines/pull-request/part-1-fips.yml b/.buildkite/pipelines/pull-request/part-1-fips.yml index 42f930c1bde9a..99544e7f5a80b 100644 --- a/.buildkite/pipelines/pull-request/part-1-fips.yml +++ b/.buildkite/pipelines/pull-request/part-1-fips.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-1.yml b/.buildkite/pipelines/pull-request/part-1.yml index 3d467c6c41e43..b4b9d5469ec41 100644 --- a/.buildkite/pipelines/pull-request/part-1.yml +++ b/.buildkite/pipelines/pull-request/part-1.yml @@ -7,3 +7,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-2-fips.yml b/.buildkite/pipelines/pull-request/part-2-fips.yml index 6a3647ceb50ae..36a9801547d78 100644 --- a/.buildkite/pipelines/pull-request/part-2-fips.yml +++ b/.buildkite/pipelines/pull-request/part-2-fips.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-2.yml b/.buildkite/pipelines/pull-request/part-2.yml index 43de69bbcd945..12bd78cf895fd 100644 --- a/.buildkite/pipelines/pull-request/part-2.yml +++ b/.buildkite/pipelines/pull-request/part-2.yml @@ -7,3 +7,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-3-fips.yml b/.buildkite/pipelines/pull-request/part-3-fips.yml index cee3ea153acb9..4a2df3026e782 100644 --- a/.buildkite/pipelines/pull-request/part-3-fips.yml +++ b/.buildkite/pipelines/pull-request/part-3-fips.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-3.yml b/.buildkite/pipelines/pull-request/part-3.yml index 12abae7634822..6991c05da85c6 100644 --- a/.buildkite/pipelines/pull-request/part-3.yml +++ b/.buildkite/pipelines/pull-request/part-3.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-4-fips.yml b/.buildkite/pipelines/pull-request/part-4-fips.yml index 11a50456ca4c0..734f8af816895 100644 --- a/.buildkite/pipelines/pull-request/part-4-fips.yml +++ b/.buildkite/pipelines/pull-request/part-4-fips.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-4.yml b/.buildkite/pipelines/pull-request/part-4.yml index af11f08953d07..59f2f2898a590 100644 --- a/.buildkite/pipelines/pull-request/part-4.yml +++ b/.buildkite/pipelines/pull-request/part-4.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-5-fips.yml b/.buildkite/pipelines/pull-request/part-5-fips.yml index 4e193ac751086..801b812bb99c0 100644 --- a/.buildkite/pipelines/pull-request/part-5-fips.yml +++ b/.buildkite/pipelines/pull-request/part-5-fips.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/part-5.yml b/.buildkite/pipelines/pull-request/part-5.yml index 306ce7533d0ed..c7e50631d1cdd 100644 --- a/.buildkite/pipelines/pull-request/part-5.yml +++ b/.buildkite/pipelines/pull-request/part-5.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/precommit.yml b/.buildkite/pipelines/pull-request/precommit.yml index f6548dfeed9b2..8d1458b1b60c8 100644 --- a/.buildkite/pipelines/pull-request/precommit.yml +++ b/.buildkite/pipelines/pull-request/precommit.yml @@ -10,3 +10,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/rest-compatibility.yml b/.buildkite/pipelines/pull-request/rest-compatibility.yml index a69810e23d960..16144a2a0780f 100644 --- a/.buildkite/pipelines/pull-request/rest-compatibility.yml +++ b/.buildkite/pipelines/pull-request/rest-compatibility.yml @@ -9,3 +9,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/.buildkite/pipelines/pull-request/validate-changelogs.yml b/.buildkite/pipelines/pull-request/validate-changelogs.yml index 9451d321a9b39..296ef11637118 100644 --- a/.buildkite/pipelines/pull-request/validate-changelogs.yml +++ b/.buildkite/pipelines/pull-request/validate-changelogs.yml @@ -7,3 +7,4 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + diskSizeGb: 250 diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index d25798ad071bd..0b62243e66afe 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -1139,7 +1139,7 @@ private void logFileContents(String description, Path from, boolean tailLogs) { } } if (foundLeaks) { - throw new TestClustersException("Found resource leaks in node logs."); + throw new TestClustersException("Found resource leaks in node log: " + from); } } diff --git a/docs/changelog/109636.yaml b/docs/changelog/109636.yaml new file mode 100644 index 0000000000000..f8f73a75dfd3d --- /dev/null +++ b/docs/changelog/109636.yaml @@ -0,0 +1,5 @@ +pr: 109636 +summary: "Ensure a lazy rollover request will rollover the target data stream once." +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/109657.yaml b/docs/changelog/109657.yaml new file mode 100644 index 0000000000000..35b315b7568c9 --- /dev/null +++ b/docs/changelog/109657.yaml @@ -0,0 +1,5 @@ +pr: 109657 +summary: Track `RequestedRangeNotSatisfiedException` separately in S3 Metrics +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/109779.yaml b/docs/changelog/109779.yaml new file mode 100644 index 0000000000000..4ccd8d475ec8d --- /dev/null +++ b/docs/changelog/109779.yaml @@ -0,0 +1,5 @@ +pr: 109779 +summary: Include component templates in retention validaiton +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/109781.yaml b/docs/changelog/109781.yaml new file mode 100644 index 0000000000000..df74645b53d84 --- /dev/null +++ b/docs/changelog/109781.yaml @@ -0,0 +1,5 @@ +pr: 109781 +summary: ES|QL Add primitive float variants of all aggregators to the compute engine +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/109848.yaml b/docs/changelog/109848.yaml new file mode 100644 index 0000000000000..858bbe84ef3a4 --- /dev/null +++ b/docs/changelog/109848.yaml @@ -0,0 +1,5 @@ +pr: 109848 +summary: Denser in-memory representation of `ShardBlobsToDelete` +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/109882.yaml b/docs/changelog/109882.yaml new file mode 100644 index 0000000000000..0f0fed01c5a7a --- /dev/null +++ b/docs/changelog/109882.yaml @@ -0,0 +1,5 @@ +pr: 109882 +summary: Support synthetic source together with `ignore_malformed` in histogram fields +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/109931.yaml b/docs/changelog/109931.yaml new file mode 100644 index 0000000000000..3575cfd49176f --- /dev/null +++ b/docs/changelog/109931.yaml @@ -0,0 +1,5 @@ +pr: 109931 +summary: Apply FLS to the contents of `IgnoredSourceFieldMapper` +area: Mapping +type: enhancement +issues: [] diff --git a/docs/reference/features/apis/reset-features-api.asciidoc b/docs/reference/features/apis/reset-features-api.asciidoc index d8ba0832cc2ad..2d2c7da039ea1 100644 --- a/docs/reference/features/apis/reset-features-api.asciidoc +++ b/docs/reference/features/apis/reset-features-api.asciidoc @@ -34,6 +34,11 @@ To list the features that will be affected, use the <> in their -default configuration. Synthetic `_source` cannot be used together with -<> or <>. +default configuration. Synthetic `_source` cannot be used together with <>. NOTE: To save space, zero-count buckets are not stored in the histogram doc values. As a result, when indexing a histogram field in an index with synthetic source enabled, diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 369f3a9d42724..412410c4e4c7c 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -182,7 +182,7 @@ public void setup() throws Exception { public void testSnapshotAndRestore() throws Exception { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setIncludeGlobalState(false) @@ -197,7 +197,7 @@ public void testSnapshotAndRestore() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .get(); @@ -240,7 +240,7 @@ public void testSnapshotAndRestore() throws Exception { public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setIncludeGlobalState(false) @@ -258,7 +258,7 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .get(); @@ -290,7 +290,7 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { public void testSnapshotAndRestoreInPlace() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setIncludeGlobalState(false) @@ -316,7 +316,7 @@ public void testSnapshotAndRestoreInPlace() { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .get(); @@ -352,7 +352,7 @@ public void testSnapshotAndRestoreInPlace() { public void testFailureStoreSnapshotAndRestore() throws Exception { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("with-fs") .setIncludeGlobalState(false) @@ -368,7 +368,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("with-fs") .get(); @@ -388,7 +388,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { // With rename pattern RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("with-fs") .setRenamePattern("-fs") @@ -430,7 +430,7 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio } boolean filterDuringSnapshotting = randomBoolean(); - CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(REPO, SNAPSHOT); + CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT); createSnapshotRequest.waitForCompletion(true); if (filterDuringSnapshotting) { createSnapshotRequest.indices(dataStreamToSnapshot); @@ -453,7 +453,7 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT); + RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT); restoreSnapshotRequest.waitForCompletion(true); restoreSnapshotRequest.includeGlobalState(false); if (filterDuringSnapshotting == false) { @@ -499,7 +499,8 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio } public void testSnapshotAndRestoreReplaceAll() throws Exception { - var createSnapshotRequest = new CreateSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true).includeGlobalState(false); + var createSnapshotRequest = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) + .includeGlobalState(false); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().createSnapshot(createSnapshotRequest).actionGet(); RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); @@ -512,7 +513,8 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); - var restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true).includeGlobalState(false); + var restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) + .includeGlobalState(false); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().restoreSnapshot(restoreSnapshotRequest).actionGet(); assertEquals(4, restoreSnapshotResponse.getRestoreInfo().successfulShards()); @@ -558,7 +560,8 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { } public void testSnapshotAndRestoreAll() throws Exception { - var createSnapshotRequest = new CreateSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true).includeGlobalState(false); + var createSnapshotRequest = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) + .includeGlobalState(false); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().createSnapshot(createSnapshotRequest).actionGet(); RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); @@ -571,7 +574,8 @@ public void testSnapshotAndRestoreAll() throws Exception { assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); - var restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true).includeGlobalState(false); + var restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) + .includeGlobalState(false); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().restoreSnapshot(restoreSnapshotRequest).actionGet(); assertEquals(4, restoreSnapshotResponse.getRestoreInfo().successfulShards()); @@ -621,7 +625,8 @@ public void testSnapshotAndRestoreAll() throws Exception { } public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { - var createSnapshotRequest = new CreateSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true).includeGlobalState(false); + var createSnapshotRequest = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) + .includeGlobalState(false); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().createSnapshot(createSnapshotRequest).actionGet(); RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); @@ -634,7 +639,7 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); - var restoreSnapshotRequest = new RestoreSnapshotRequest(REPO, SNAPSHOT).waitForCompletion(true) + var restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) .includeGlobalState(false) .includeAliases(false); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().restoreSnapshot(restoreSnapshotRequest).actionGet(); @@ -669,7 +674,7 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { public void testRename() throws Exception { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setIncludeGlobalState(false) @@ -680,12 +685,16 @@ public void testRename() throws Exception { expectThrows( SnapshotRestoreException.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIndices("ds") ); client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setRenamePattern("ds") @@ -732,7 +741,7 @@ public void testRename() throws Exception { public void testRenameWriteDataStream() throws Exception { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("other-ds") .setIncludeGlobalState(false) @@ -743,7 +752,7 @@ public void testRenameWriteDataStream() throws Exception { client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("other-ds") .setRenamePattern("other-ds") @@ -786,7 +795,7 @@ public void testRenameWriteDataStream() throws Exception { public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setIncludeGlobalState(false) @@ -797,7 +806,11 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { expectThrows( SnapshotRestoreException.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIndices("ds") ); // delete data stream @@ -806,7 +819,7 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { // restore data stream attempting to rename the backing index RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setRenamePattern(dsBackingIndexName) @@ -823,7 +836,7 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setIncludeGlobalState(false) @@ -834,13 +847,17 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { expectThrows( SnapshotRestoreException.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIndices("ds") ); // restore data stream attempting to rename the backing index RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("ds") .setRenamePattern("(.+)") @@ -866,7 +883,7 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { public void testWildcards() throws Exception { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, "snap2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2") .setWaitForCompletion(true) .setIndices("d*") .setIncludeGlobalState(false) @@ -877,7 +894,7 @@ public void testWildcards() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, "snap2") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2") .setWaitForCompletion(true) .setIndices("d*") .setRenamePattern("ds") @@ -903,7 +920,7 @@ public void testWildcards() throws Exception { public void testDataStreamNotStoredWhenIndexRequested() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, "snap2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2") .setWaitForCompletion(true) .setIndices(dsBackingIndexName) .setIncludeGlobalState(false) @@ -913,14 +930,14 @@ public void testDataStreamNotStoredWhenIndexRequested() { assertEquals(RestStatus.OK, status); expectThrows( Exception.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, "snap2").setWaitForCompletion(true).setIndices("ds") + client.admin().cluster().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2").setWaitForCompletion(true).setIndices("ds") ); } public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, "snap2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2") .setWaitForCompletion(true) .setIndices("ds") .setIncludeGlobalState(false) @@ -933,7 +950,7 @@ public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, "snap2") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2") .setWaitForCompletion(true) .setIndices(".ds-ds-*") .get(); @@ -948,7 +965,7 @@ public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionExcepti final String snapshotName = "test-snap"; CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName) .setWaitForCompletion(true) .setIndices("does-not-exist-*") .setIncludeGlobalState(true) @@ -957,7 +974,11 @@ public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionExcepti assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" }))); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO, snapshotName).get(); + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO, + snapshotName + ).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), empty()); } @@ -965,7 +986,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { Client client1 = client(); // this test uses a MockRepository - assertAcked(clusterAdmin().prepareDeleteRepository(REPO)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPO)); final String repositoryName = "test-repo"; createRepository( @@ -1000,7 +1021,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { logger.info("--> snapshot"); ActionFuture future = client1.admin() .cluster() - .prepareCreateSnapshot(repositoryName, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, SNAPSHOT) .setIndices(dataStream) .setWaitForCompletion(true) .setPartial(false) @@ -1036,13 +1057,16 @@ public void testCloneSnapshotThatIncludesDataStream() throws Exception { assertSuccessful( client.admin() .cluster() - .prepareCreateSnapshot(REPO, sourceSnapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, sourceSnapshotName) .setWaitForCompletion(true) .setIndices("ds", indexWithoutDataStream) .setIncludeGlobalState(false) .execute() ); - assertAcked(clusterAdmin().prepareCloneSnapshot(REPO, sourceSnapshotName, "target-snapshot-1").setIndices(indexWithoutDataStream)); + assertAcked( + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, REPO, sourceSnapshotName, "target-snapshot-1") + .setIndices(indexWithoutDataStream) + ); } public void testPartialRestoreSnapshotThatIncludesDataStream() { @@ -1053,7 +1077,7 @@ public void testPartialRestoreSnapshotThatIncludesDataStream() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setIndices(indexWithoutDataStream) .setWaitForCompletion(true) .setRestoreGlobalState(false) @@ -1078,7 +1102,7 @@ public void testPartialRestoreSnapshotThatIncludesDataStreamWithGlobalState() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setIndices(indexWithoutDataStream) .setWaitForCompletion(true) .setRestoreGlobalState(true) @@ -1096,11 +1120,11 @@ public void testSnapshotDSDuringRollover() throws Exception { final boolean partial = randomBoolean(); blockAllDataNodes(repoName); final String snapshotName = "ds-snap"; - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .setPartial(partial) - .setIncludeGlobalState(randomBoolean()) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).setPartial(partial).setIncludeGlobalState(randomBoolean()).execute(); waitForBlockOnAnyDataNode(repoName); awaitNumberOfSnapshotsInProgress(1); final ActionFuture rolloverResponse = indicesAdmin().rolloverIndex(new RolloverRequest("ds", null)); @@ -1117,7 +1141,7 @@ public void testSnapshotDSDuringRollover() throws Exception { assertThat(snapshotInfo.dataStreams(), hasItems("ds")); assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" })).get()); - RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) + RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setIndices("ds") .get() @@ -1134,11 +1158,11 @@ public void testSnapshotDSDuringRolloverAndDeleteOldIndex() throws Exception { createRepository(repoName, "mock"); blockAllDataNodes(repoName); final String snapshotName = "ds-snap"; - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .setPartial(true) - .setIncludeGlobalState(randomBoolean()) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).setPartial(true).setIncludeGlobalState(randomBoolean()).execute(); waitForBlockOnAnyDataNode(repoName); awaitNumberOfSnapshotsInProgress(1); final RolloverResponse rolloverResponse = indicesAdmin().rolloverIndex(new RolloverRequest("ds", null)).get(); @@ -1157,7 +1181,7 @@ public void testSnapshotDSDuringRolloverAndDeleteOldIndex() throws Exception { ); assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "other-ds" }))); - RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) + RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setIndices("other-ds") .get() @@ -1176,7 +1200,7 @@ public void testExcludeDSFromSnapshotWhenExcludingItsIndices() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setWaitForCompletion(true) .setRestoreGlobalState(false) .get() @@ -1201,7 +1225,7 @@ public void testExcludeDSFromSnapshotWhenExcludingItsIndicesWithGlobalState() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setWaitForCompletion(true) .setRestoreGlobalState(true) .get() @@ -1221,7 +1245,7 @@ public void testRestoreSnapshotFully() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName) .setWaitForCompletion(true) .get(); assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); @@ -1241,7 +1265,7 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti assertAcked(client.execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var e = expectThrows( IllegalStateException.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) + client.admin().cluster().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } finally { @@ -1264,7 +1288,7 @@ public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Excep var e = expectThrows( IllegalStateException.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) + client.admin().cluster().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (my-alias)")); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java new file mode 100644 index 0000000000000..89d576e74be2f --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; +import org.elasticsearch.test.disruption.SingleNodeDisruption; +import org.elasticsearch.xcontent.XContentType; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) +public class LazyRolloverDuringDisruptionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class); + } + + public void testRolloverIsExecutedOnce() throws ExecutionException, InterruptedException { + String masterNode = internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNodes(3); + ensureStableCluster(4); + + String dataStreamName = "my-data-stream"; + createDataStream(dataStreamName); + + // Mark it to lazy rollover + new RolloverRequestBuilder(client()).setRolloverTarget(dataStreamName).lazy(true).execute().get(); + + // Verify that the data stream is marked for rollover and that it has currently one index + DataStream dataStream = getDataStream(dataStreamName); + assertThat(dataStream.rolloverOnWrite(), equalTo(true)); + assertThat(dataStream.getBackingIndices().getIndices().size(), equalTo(1)); + + // Introduce a disruption to the master node that should delay the rollover execution + SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(random(), masterNode, 100, 200, 30000, 60000); + internalCluster().setDisruptionScheme(masterNodeDisruption); + masterNodeDisruption.startDisrupting(); + + // Start indexing operations + int docs = randomIntBetween(5, 10); + CountDownLatch countDownLatch = new CountDownLatch(docs); + for (int i = 0; i < docs; i++) { + var indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE); + final String doc = "{ \"@timestamp\": \"2099-05-06T16:21:15.000Z\", \"message\": \"something cool happened\" }"; + indexRequest.source(doc, XContentType.JSON); + client().index(indexRequest, new ActionListener<>() { + @Override + public void onResponse(DocWriteResponse docWriteResponse) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Indexing request should have succeeded eventually, failed with " + e.getMessage()); + } + }); + } + + // End the disruption so that all pending tasks will complete + masterNodeDisruption.stopDisrupting(); + + // Wait for all the indexing requests to be processed successfully + countDownLatch.await(); + + // Verify that the rollover has happened once + dataStream = getDataStream(dataStreamName); + assertThat(dataStream.rolloverOnWrite(), equalTo(false)); + assertThat(dataStream.getBackingIndices().getIndices().size(), equalTo(2)); + } + + private DataStream getDataStream(String dataStreamName) { + return client().execute(GetDataStreamAction.INSTANCE, new GetDataStreamAction.Request(new String[] { dataStreamName })) + .actionGet() + .getDataStreams() + .get(0) + .getDataStream(); + } + + private void createDataStream(String dataStreamName) throws InterruptedException, ExecutionException { + final TransportPutComposableIndexTemplateAction.Request putComposableTemplateRequest = + new TransportPutComposableIndexTemplateAction.Request("my-template"); + putComposableTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() + ); + final AcknowledgedResponse putComposableTemplateResponse = client().execute( + TransportPutComposableIndexTemplateAction.TYPE, + putComposableTemplateRequest + ).actionGet(); + assertThat(putComposableTemplateResponse.isAcknowledged(), is(true)); + + final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + final AcknowledgedResponse createDataStreamResponse = client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) + .get(); + assertThat(createDataStreamResponse.isAcknowledged(), is(true)); + } +} diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java index 698656dfa7406..c147677cf856c 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java @@ -78,7 +78,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { } assertSuccessful( - clusterAdmin().prepareCreateSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIncludeGlobalState(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIncludeGlobalState(true) + .execute() ); // We have to delete the data stream directly, as the feature reset API doesn't clean up system data streams yet @@ -98,7 +101,7 @@ public void testSystemDataStreamInGlobalState() throws Exception { // Make sure requesting the data stream by name throws. // For some reason, expectThrows() isn't working for me here, hence the try/catch. try { - clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setIndices(".test-data-stream") .setWaitForCompletion(true) .setRestoreGlobalState(randomBoolean()) // this shouldn't matter @@ -117,7 +120,7 @@ public void testSystemDataStreamInGlobalState() throws Exception { assertSystemDataStreamDoesNotExist(); // Now actually restore the data stream - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setRestoreGlobalState(true) .get(); @@ -132,7 +135,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { // Attempting to restore again without specifying indices or global/feature states should work, as the wildcard should not be // resolved to system indices/data streams. - clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setRestoreGlobalState(false).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setRestoreGlobalState(false) + .get(); assertEquals(restoreSnapshotResponse.getRestoreInfo().totalShards(), restoreSnapshotResponse.getRestoreInfo().successfulShards()); } @@ -182,7 +188,7 @@ public void testSystemDataStreamInFeatureState() throws Exception { } SnapshotInfo snapshotInfo = assertSuccessful( - clusterAdmin().prepareCreateSnapshot(REPO, SNAPSHOT) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setIndices("my-index") .setFeatureStates(SystemDataStreamTestPlugin.class.getSimpleName()) .setWaitForCompletion(true) @@ -207,7 +213,7 @@ public void testSystemDataStreamInFeatureState() throws Exception { assertThat(indicesRemaining.indices(), arrayWithSize(0)); } - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("my-index") .setFeatureStates(SystemDataStreamTestPlugin.class.getSimpleName()) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index d9ab689c05a5c..2fc728a4fae34 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -113,7 +113,11 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ) .setType("azure") .setSettings( Settings.builder() diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index b2df41c69eda7..4afa6f2a10b5c 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -71,7 +71,11 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ) .setType("gcs") .setSettings( Settings.builder() diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index f8503bca3ec67..640293ecb80b0 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Iterators; @@ -23,6 +22,7 @@ import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.RequestedRangeNotSatisfiedException; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; @@ -39,6 +39,7 @@ import static org.elasticsearch.repositories.RepositoriesMetrics.HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_OPERATIONS_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_TOTAL; @@ -47,8 +48,10 @@ import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL; import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.REQUESTED_RANGE_NOT_SATISFIED; import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -80,22 +83,29 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .build(); } - public void testMetricsWithErrors() throws IOException { - final String repository = createRepository(randomRepositoryName()); - - final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); - final var blobStoreRepository = (BlobStoreRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) - .repository(repository); - final BlobStore blobStore = blobStoreRepository.blobStore(); - final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + private static TestTelemetryPlugin getPlugin(String dataNodeName) { + var plugin = internalCluster().getInstance(PluginsService.class, dataNodeName) .filterPlugins(TestTelemetryPlugin.class) .findFirst() .orElseThrow(); - plugin.resetMeter(); + return plugin; + } + + private static BlobContainer getBlobContainer(String dataNodeName, String repository) { + final var blobStoreRepository = (BlobStoreRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(repository); + return blobStoreRepository.blobStore().blobContainer(BlobPath.EMPTY.add(randomIdentifier())); + } + + public void testMetricsWithErrors() throws IOException { + final String repository = createRepository(randomRepositoryName()); + + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final TestTelemetryPlugin plugin = getPlugin(dataNodeName); final OperationPurpose purpose = randomFrom(OperationPurpose.values()); - final BlobContainer blobContainer = blobStore.blobContainer(BlobPath.EMPTY.add(randomIdentifier())); + final BlobContainer blobContainer = getBlobContainer(dataNodeName, repository); final String blobName = randomIdentifier(); // Put a blob @@ -132,6 +142,9 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + + // Make sure we don't hit the request range not satisfied counters + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL, Operation.GET_OBJECT), equalTo(0L)); } // List retry exhausted @@ -166,6 +179,39 @@ public void testMetricsWithErrors() throws IOException { assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(1L)); } + public void testMetricsForRequestRangeNotSatisfied() { + final String repository = createRepository(randomRepositoryName()); + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final BlobContainer blobContainer = getBlobContainer(dataNodeName, repository); + final TestTelemetryPlugin plugin = getPlugin(dataNodeName); + + final OperationPurpose purpose = randomFrom(OperationPurpose.values()); + final String blobName = randomIdentifier(); + + for (int i = 0; i < randomIntBetween(1, 3); i++) { + final long batch = i + 1; + addErrorStatus(TOO_MANY_REQUESTS, TOO_MANY_REQUESTS, REQUESTED_RANGE_NOT_SATISFIED); + try { + blobContainer.readBlob(purpose, blobName).close(); + } catch (Exception e) { + assertThat(e, instanceOf(RequestedRangeNotSatisfiedException.class)); + } + + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.GET_OBJECT), equalTo(3 * batch)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat( + getLongCounterValue(plugin, METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL, Operation.GET_OBJECT), + equalTo(batch) + ); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(2 * batch)); + assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(2 * batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + } + } + private void addErrorStatus(RestStatus... statuses) { errorStatusQueue.addAll(Arrays.asList(statuses)); } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 88f0e01db3e6a..c97e26651d4ee 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -200,8 +200,8 @@ public void testAbortRequestStats() throws Exception { // Intentionally fail snapshot to trigger abortMultipartUpload requests shouldFailCompleteMultipartUploadRequest.set(true); final String snapshot = "snapshot"; - clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index).get(); - clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get(); final RepositoryStats repositoryStats = StreamSupport.stream( internalCluster().getInstances(RepositoriesService.class).spliterator(), @@ -242,12 +242,16 @@ public void testMetrics() throws Exception { assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); final String snapshot = "snapshot"; - assertSuccessfulSnapshot(clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)); + assertSuccessfulSnapshot( + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); assertAcked(client().admin().indices().prepareDelete(index)); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true) + ); ensureGreen(index); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get()); final Map aggregatedMetrics = new HashMap<>(); // Compare collected stats and metrics for each node and they should be the same @@ -389,7 +393,7 @@ public void testEnforcedCooldownPeriod() throws IOException { true ); - final SnapshotId fakeOldSnapshot = clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-old") + final SnapshotId fakeOldSnapshot = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-old") .setWaitForCompletion(true) .setIndices() .get() @@ -434,15 +438,15 @@ public void testEnforcedCooldownPeriod() throws IOException { final String newSnapshotName = "snapshot-new"; final long beforeThrottledSnapshot = repository.threadPool().relativeTimeInNanos(); - clusterAdmin().prepareCreateSnapshot(repoName, newSnapshotName).setWaitForCompletion(true).setIndices().get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, newSnapshotName).setWaitForCompletion(true).setIndices().get(); assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledSnapshot, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); final long beforeThrottledDelete = repository.threadPool().relativeTimeInNanos(); - clusterAdmin().prepareDeleteSnapshot(repoName, newSnapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, newSnapshotName).get(); assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledDelete, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); final long beforeFastDelete = repository.threadPool().relativeTimeInNanos(); - clusterAdmin().prepareDeleteSnapshot(repoName, fakeOldSnapshot.getName()).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, fakeOldSnapshot.getName()).get(); assertThat(repository.threadPool().relativeTimeInNanos() - beforeFastDelete, lessThan(TEST_COOLDOWN_PERIOD.getNanos())); } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 5064910723ab6..2359176abf715 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -127,10 +127,11 @@ protected void createRepository(String repoName) { settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) - .setType("s3") - .setSettings(settings) - .get(); + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ).setType("s3").setSettings(settings).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 2aff610dc82e9..5af53364fb765 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -52,6 +52,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.rest.RestStatus.REQUESTED_RANGE_NOT_SATISFIED; class S3BlobStore implements BlobStore { @@ -177,6 +178,23 @@ public final void collectMetrics(Request request, Response response) { .map(List::size) .orElse(0); + if (exceptionCount > 0) { + final List statusCodes = Objects.requireNonNullElse( + awsRequestMetrics.getProperty(AWSRequestMetrics.Field.StatusCode), + List.of() + ); + // REQUESTED_RANGE_NOT_SATISFIED errors are expected errors due to RCO + // TODO Add more expected client error codes? + final long amountOfRequestRangeNotSatisfiedErrors = statusCodes.stream() + .filter(e -> (Integer) e == REQUESTED_RANGE_NOT_SATISFIED.getStatus()) + .count(); + if (amountOfRequestRangeNotSatisfiedErrors > 0) { + s3RepositoriesMetrics.common() + .requestRangeNotSatisfiedExceptionCounter() + .incrementBy(amountOfRequestRangeNotSatisfiedErrors, attributes); + } + } + s3RepositoriesMetrics.common().operationCounter().incrementBy(1, attributes); if (numberOfAwsErrors == requestCount) { s3RepositoriesMetrics.common().unsuccessfulOperationCounter().incrementBy(1, attributes); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 9a1d12fab0af5..e59b3e8f90620 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -244,7 +244,12 @@ public void sendResponse(RestResponse response) { } private void createRepository(final String name, final Settings repositorySettings) { - assertAcked(clusterAdmin().preparePutRepository(name).setType(S3Repository.TYPE).setVerify(false).setSettings(repositorySettings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) + .setType(S3Repository.TYPE) + .setVerify(false) + .setSettings(repositorySettings) + ); } /** diff --git a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java index 80798d931e93f..335da9123ed5a 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java @@ -46,7 +46,7 @@ public void testUrlRepository() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType(FsRepository.TYPE) .setSettings( Settings.builder() @@ -69,7 +69,7 @@ public void testUrlRepository() throws Exception { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -79,7 +79,7 @@ public void testUrlRepository() throws Exception { SnapshotState state = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots() @@ -94,7 +94,7 @@ public void testUrlRepository() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("url-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "url-repo") .setType(URLRepository.TYPE) .setSettings( Settings.builder() @@ -105,7 +105,7 @@ public void testUrlRepository() throws Exception { logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("url-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "url-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -114,15 +114,18 @@ public void testUrlRepository() throws Exception { assertHitCount(client.prepareSearch("test-idx").setSize(0), 100); logger.info("--> list available shapshots"); - GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); + GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "url-repo").get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); logger.info("--> delete snapshot"); - AcknowledgedResponse deleteSnapshotResponse = client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get(); + AcknowledgedResponse deleteSnapshotResponse = client.admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .get(); assertAcked(deleteSnapshotResponse); logger.info("--> list available shapshot again, no snapshots should be returned"); - getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); + getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "url-repo").get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0)); } @@ -130,7 +133,7 @@ public void testUrlRepositoryPermitsShutdown() throws Exception { assertAcked( client().admin() .cluster() - .preparePutRepository("url-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "url-repo") .setType(URLRepository.TYPE) .setVerify(false) .setSettings(Settings.builder().put(URLRepository.URL_SETTING.getKey(), "http://localhost/")) diff --git a/muted-tests.yml b/muted-tests.yml index 9b58a9446b3ca..a17e95e9a5b3f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -67,9 +67,6 @@ tests: - class: "org.elasticsearch.xpack.shutdown.NodeShutdownReadinessIT" issue: "https://github.com/elastic/elasticsearch/issues/109838" method: "testShutdownReadinessService" -- class: "org.elasticsearch.packaging.test.RpmPreservationTests" - issue: "https://github.com/elastic/elasticsearch/issues/109898" - method: "test30PreserveConfig" - class: "org.elasticsearch.xpack.security.ScrollHelperIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/109905" method: "testFetchAllEntities" @@ -78,6 +75,8 @@ tests: - class: "org.elasticsearch.xpack.esql.action.AsyncEsqlQueryActionIT" issue: "https://github.com/elastic/elasticsearch/issues/109944" method: "testBasicAsyncExecution" +- class: "org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests" + issue: "https://github.com/elastic/elasticsearch/issues/110015" # Examples: diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java index a6d2bdcf8a1d4..6726929bdc91c 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java @@ -39,7 +39,7 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { assertAcked( - clusterAdmin().preparePutRepository(repoName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("hdfs") .setSettings( Settings.builder() diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 0e2ec25b6cfaa..081c6c26319ab 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -44,7 +44,7 @@ public void testSimpleWorkflow() { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings( Settings.builder() @@ -75,7 +75,7 @@ public void testSimpleWorkflow() { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-3") .get(); @@ -86,7 +86,14 @@ public void testSimpleWorkflow() { ); assertThat( - client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), + client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap") + .get() + .getSnapshots() + .get(0) + .state(), equalTo(SnapshotState.SUCCESS) ); @@ -111,7 +118,7 @@ public void testSimpleWorkflow() { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -127,7 +134,7 @@ public void testSimpleWorkflow() { logger.info("--> restore one index after deletion"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") .get(); @@ -143,7 +150,10 @@ public void testSimpleWorkflow() { public void testMissingUri() { try { - clusterAdmin().preparePutRepository("test-repo").setType("hdfs").setSettings(Settings.EMPTY).get(); + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setType("hdfs") + .setSettings(Settings.EMPTY) + .get(); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -153,7 +163,7 @@ public void testMissingUri() { public void testEmptyUri() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "/path").build()) .get(); @@ -166,7 +176,7 @@ public void testEmptyUri() { public void testNonHdfsUri() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "file:///").build()) .get(); @@ -179,7 +189,7 @@ public void testNonHdfsUri() { public void testPathSpecifiedInHdfs() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///some/path").build()) .get(); @@ -192,7 +202,7 @@ public void testPathSpecifiedInHdfs() { public void testMissingPath() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///").build()) .get(); @@ -207,7 +217,7 @@ public void testReplicationFactorBelowOne() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///").put("replication_factor", "0").put("path", "foo").build()) .get(); @@ -222,7 +232,7 @@ public void testReplicationFactorOverMaxShort() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///").put("replication_factor", "32768").put("path", "foo").build()) .get(); @@ -237,7 +247,7 @@ public void testReplicationFactorBelowReplicationMin() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings( Settings.builder() @@ -259,7 +269,7 @@ public void testReplicationFactorOverReplicationMax() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings( Settings.builder() diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 93dcd5a12d43d..9d330cd7e35eb 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -262,7 +262,7 @@ private static void createRepository(String repoName, boolean readOnly, boolean Request repoReq = new Request("PUT", "/_snapshot/" + repoName); repoReq.setJsonEntity( Strings.toString( - new PutRepositoryRequest().type("fs") + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).type("fs") .verify(verify) .settings(Settings.builder().put("location", repoName).put("readonly", readOnly).build()) ) diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index b12a70ccb8425..4c6774988d7ae 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -96,7 +96,7 @@ public void testSortOrder() throws Exception { private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) throws IOException { final boolean includeIndexNames = randomBoolean(); - final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName) + final List defaultSorting = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setOrder(order) .setIncludeIndexNames(includeIndexNames) .get() @@ -239,7 +239,7 @@ public void testFilterBySLMPolicy() throws Exception { final String repoName = "test-repo"; AbstractSnapshotIntegTestCase.createRepository(logger, repoName, "fs"); AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName, randomIntBetween(1, 5)); - final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots("*") + final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "*") .setSnapshots("*") .setSort(SnapshotSortKey.NAME) .get() @@ -248,7 +248,7 @@ public void testFilterBySLMPolicy() throws Exception { final String policyName = "some-policy"; final SnapshotInfo withPolicy = AbstractSnapshotIntegTestCase.assertSuccessful( logger, - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, policyName)) .setWaitForCompletion(true) .execute() @@ -268,7 +268,7 @@ public void testFilterBySLMPolicy() throws Exception { final String otherPolicyName = "other-policy"; final SnapshotInfo withOtherPolicy = AbstractSnapshotIntegTestCase.assertSuccessful( logger, - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithOtherPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithOtherPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, otherPolicyName)) .setWaitForCompletion(true) .execute() @@ -276,7 +276,7 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies("*"), is(List.of(withOtherPolicy, withPolicy))); assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName), is(List.of(withOtherPolicy, withPolicy))); assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName, "no-such-policy*"), is(List.of(withOtherPolicy, withPolicy))); - final List allSnapshots = clusterAdmin().prepareGetSnapshots("*") + final List allSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "*") .setSnapshots("*") .setSort(SnapshotSortKey.NAME) .get() @@ -293,7 +293,7 @@ public void testSortAfterStartTime() throws Exception { final SnapshotInfo snapshot2 = createFullSnapshotWithUniqueStartTime(repoName, "snapshot-2", startTimes); final SnapshotInfo snapshot3 = createFullSnapshotWithUniqueStartTime(repoName, "snapshot-3", startTimes); - final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .get() @@ -310,7 +310,7 @@ public void testSortAfterStartTime() throws Exception { assertThat(allAfterStartTimeAscending(startTime3), is(List.of(snapshot3))); assertThat(allAfterStartTimeAscending(startTime3 + 1), empty()); - final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) @@ -331,7 +331,7 @@ private SnapshotInfo createFullSnapshotWithUniqueStartTime(String repoName, Stri final SnapshotInfo snapshotInfo = AbstractSnapshotIntegTestCase.createFullSnapshot(logger, repoName, snapshotName); if (forbiddenStartTimes.contains(snapshotInfo.startTime())) { logger.info("--> snapshot start time collided"); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).get()); } else { assertTrue(forbiddenStartTimes.add(snapshotInfo.startTime())); return snapshotInfo; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json b/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json index 1a7f944e88079..dec102a681c81 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json @@ -18,6 +18,12 @@ ] } ] + }, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + } } } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 3d95712d30b30..9fc82eb125def 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -70,6 +70,53 @@ object with unmapped fields: - match: { hits.hits.1._source.a.very.deeply.nested.field: BBBB } +--- +unmapped arrays: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 + + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "aaaa", "object_array": [ { "int_value": 10 }, { "int_value": 20 } ] }' + - '{ "create": { } }' + - '{ "name": "bbbb", "value_array": [ 100, 200, 300 ] }' + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: aaaa } + - match: { hits.hits.0._source.object_array.0.int_value: 10 } + - match: { hits.hits.0._source.object_array.1.int_value: 20 } + - match: { hits.hits.1._source.name: bbbb } + - match: { hits.hits.1._source.value_array: [ 100, 200, 300] } + + --- nested object with unmapped fields: - requires: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 3aee1fdf505fe..47ed06ed4a905 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -32,7 +32,7 @@ public void testPutRepositoryWithBlocks() { try { setClusterReadOnly(true); assertBlocked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())), @@ -44,7 +44,7 @@ public void testPutRepositoryWithBlocks() { logger.info("--> registering a repository is allowed when the cluster is not read only"); assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -53,7 +53,7 @@ public void testPutRepositoryWithBlocks() { public void testVerifyRepositoryWithBlocks() { assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -62,7 +62,11 @@ public void testVerifyRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository("test-repo-blocks").get(); + VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo-blocks" + ).get(); assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); @@ -71,7 +75,7 @@ public void testVerifyRepositoryWithBlocks() { public void testDeleteRepositoryWithBlocks() { assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -80,18 +84,21 @@ public void testDeleteRepositoryWithBlocks() { logger.info("--> deleting a repository is blocked when the cluster is read only"); try { setClusterReadOnly(true); - assertBlocked(clusterAdmin().prepareDeleteRepository("test-repo-blocks"), Metadata.CLUSTER_READ_ONLY_BLOCK); + assertBlocked( + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks"), + Metadata.CLUSTER_READ_ONLY_BLOCK + ); } finally { setClusterReadOnly(false); } logger.info("--> deleting a repository is allowed when the cluster is not read only"); - assertAcked(clusterAdmin().prepareDeleteRepository("test-repo-blocks")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks")); } public void testGetRepositoryWithBlocks() { assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -100,7 +107,7 @@ public void testGetRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories("test-repo-blocks").get(); + GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "test-repo-blocks").get(); assertThat(response.repositories(), hasSize(1)); } finally { setClusterReadOnly(false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index b6b0b2e54e691..eff71d0caf650 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -56,17 +56,21 @@ protected void setUpRepository() throws Exception { logger.info("--> register a repository"); assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPOSITORY_NAME) .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) ); logger.info("--> verify the repository"); - VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository(REPOSITORY_NAME).get(); + VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + REPOSITORY_NAME + ).get(); assertThat(verifyResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> create a snapshot"); - CreateSnapshotResponse snapshotResponse = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + CreateSnapshotResponse snapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME) .setIncludeGlobalState(true) .setWaitForCompletion(true) .get(); @@ -79,7 +83,10 @@ public void testCreateSnapshotWithBlocks() { try { setClusterReadOnly(true); assertThat( - clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setWaitForCompletion(true).get().status(), + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-1") + .setWaitForCompletion(true) + .get() + .status(), equalTo(RestStatus.OK) ); } finally { @@ -87,7 +94,7 @@ public void testCreateSnapshotWithBlocks() { } logger.info("--> creating a snapshot is allowed when the cluster is not read only"); - CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-2") .setWaitForCompletion(true) .get(); assertThat(response.status(), equalTo(RestStatus.OK)); @@ -98,7 +105,7 @@ public void testCreateSnapshotWithIndexBlocks() { try { enableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); assertThat( - clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-1") .setIndices(COMMON_INDEX_NAME_MASK) .setWaitForCompletion(true) .get() @@ -113,7 +120,7 @@ public void testCreateSnapshotWithIndexBlocks() { try { enableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); assertThat( - clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-2") .setIndices(COMMON_INDEX_NAME_MASK) .setWaitForCompletion(true) .get() @@ -129,7 +136,7 @@ public void testDeleteSnapshotWithBlocks() { logger.info("--> deleting a snapshot is allowed when the cluster is read only"); try { setClusterReadOnly(true); - assertTrue(clusterAdmin().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME).get().isAcknowledged()); + assertTrue(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME).get().isAcknowledged()); } finally { setClusterReadOnly(false); } @@ -143,13 +150,16 @@ public void testRestoreSnapshotWithBlocks() { logger.info("--> restoring a snapshot is blocked when the cluster is read only"); try { setClusterReadOnly(true); - assertBlocked(clusterAdmin().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), Metadata.CLUSTER_READ_ONLY_BLOCK); + assertBlocked( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME), + Metadata.CLUSTER_READ_ONLY_BLOCK + ); } finally { setClusterReadOnly(false); } logger.info("--> creating a snapshot is allowed when the cluster is not read only"); - RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME) .setWaitForCompletion(true) .get(); assertThat(response.status(), equalTo(RestStatus.OK)); @@ -161,7 +171,7 @@ public void testGetSnapshotWithBlocks() { // This test checks that the Get Snapshot operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(REPOSITORY_NAME).get(); + GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME).get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).snapshotId().getName(), equalTo(SNAPSHOT_NAME)); } finally { @@ -173,7 +183,9 @@ public void testSnapshotStatusWithBlocks() { // This test checks that the Snapshot Status operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(REPOSITORY_NAME).setSnapshots(SNAPSHOT_NAME).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME) + .setSnapshots(SNAPSHOT_NAME) + .get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).getState().completed(), equalTo(true)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java index c024d7ffb9772..aaf663c8c5b24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java @@ -43,7 +43,7 @@ public void testDesiredNodesAreNotIncludedInSnapshotsClusterState() { final var desiredNodesAfterSnapshot = getLatestDesiredNodes(); - clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName).setRestoreGlobalState(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName).setRestoreGlobalState(true).get(); final var desiredNodesAfterRestore = getLatestDesiredNodes(); assertThat(desiredNodesAfterRestore.historyID(), is(equalTo(desiredNodesAfterSnapshot.historyID()))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index bb9324dd7d10c..8b551e00caeeb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -327,12 +327,18 @@ public void testShardCreation() throws Exception { // restoring the index from a snapshot may change the number of indexing replicas because the routing table is created afresh var repoPath = randomRepoPath(); assertAcked( - clusterAdmin().preparePutRepository("repo").setType("fs").setSettings(Settings.builder().put("location", repoPath)) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") + .setType("fs") + .setSettings(Settings.builder().put("location", repoPath)) ); assertEquals( SnapshotState.SUCCESS, - clusterAdmin().prepareCreateSnapshot("repo", "snap").setWaitForCompletion(true).get().getSnapshotInfo().state() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") + .setWaitForCompletion(true) + .get() + .getSnapshotInfo() + .state() ); if (randomBoolean()) { @@ -348,7 +354,7 @@ public void testShardCreation() throws Exception { assertEquals( 0, - clusterAdmin().prepareRestoreSnapshot("repo", "snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setIndices(INDEX_NAME) .setIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, routingTableWatcher.numReplicas)) .setWaitForCompletion(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 16be816b69bc4..a1a29468cc5bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -106,7 +106,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti ensureStableCluster(3); assertAcked( - clusterAdmin().preparePutRepository("repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -130,7 +130,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti createIndex(indexName, indexSettings(6, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); var shardSizes = createReasonableSizedShards(indexName); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repo", "snap") + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); @@ -145,7 +145,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES - 1L); refreshDiskUsage(); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repo", "snap") + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final RestoreInfo restoreInfo = restoreSnapshotResponse.getRestoreInfo(); @@ -179,7 +179,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard ensureStableCluster(3); assertAcked( - clusterAdmin().preparePutRepository("repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -203,7 +203,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard createIndex(indexName, indexSettings(6, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); var shardSizes = createReasonableSizedShards(indexName); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repo", "snap") + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); @@ -219,7 +219,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard getTestFileStore(dataNodeName).setTotalSpace(usableSpace + WATERMARK_BYTES); refreshDiskUsage(); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repo", "snap") + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final RestoreInfo restoreInfo = restoreSnapshotResponse.getRestoreInfo(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java index 8afcaccaf9e77..cab7b7df00fe9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java @@ -70,11 +70,14 @@ public void testIsGreenDuringSnapshotRestore() { var repositoryName = "repository"; var snapshotName = randomIdentifier(); assertAcked( - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) ); - clusterAdmin().prepareCreateSnapshot(repositoryName, snapshotName).setIndices(index).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setIndices(index) + .setWaitForCompletion(true) + .get(); if (randomBoolean()) { assertAcked(indicesAdmin().prepareDelete(index)); } else { @@ -83,7 +86,10 @@ public void testIsGreenDuringSnapshotRestore() { ensureGreen(); assertHealthDuring(equalTo(GREEN), () -> { - clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName).setIndices(index).setWaitForCompletion(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setIndices(index) + .setWaitForCompletion(true) + .get(); ensureGreen(index); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 2bc6856479ab7..3202f5513e9ac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -253,7 +253,13 @@ public void testRestoreSnapshotOverLimit() { repoSettings.put("compress", randomBoolean()); repoSettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); - assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(repoSettings.build())); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setType("fs") + .setSettings(repoSettings.build()) + ); int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); @@ -270,7 +276,7 @@ public void testRestoreSnapshotOverLimit() { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("snapshot-index") .get(); @@ -282,7 +288,7 @@ public void testRestoreSnapshotOverLimit() { List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots(); @@ -310,7 +316,7 @@ public void testRestoreSnapshotOverLimit() { try { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("snapshot-index") .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index 526921fdc95ba..6a8806ca26526 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -104,7 +104,7 @@ public void clusterChanged(ClusterChangedEvent event) { logger.info("--> starting snapshot"); ActionFuture future = client(masterNode1).admin() .cluster() - .prepareCreateSnapshot("test-repo", snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", snapshot) .setWaitForCompletion(true) .setIndices(idxName) .execute(); @@ -163,7 +163,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { logger.info("--> starting snapshot"); ActionFuture future = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .execute(); @@ -193,7 +193,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { blockMasterFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture snapshotFuture = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .setWaitForCompletion(true) .execute(); waitForBlock(masterNode, repoName); @@ -203,14 +203,14 @@ public void testDisruptionAfterShardFinalization() throws Exception { logger.info("--> create a snapshot expected to be successful"); final CreateSnapshotResponse successfulSnapshot = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .setWaitForCompletion(true) .get(); final SnapshotInfo successfulSnapshotInfo = successfulSnapshot.getSnapshotInfo(); assertThat(successfulSnapshotInfo.state(), is(SnapshotState.SUCCESS)); logger.info("--> making sure snapshot delete works out cleanly"); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, "snapshot-2").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2").get()); } public void testMasterFailOverDuringShardSnapshots() throws Exception { @@ -230,7 +230,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { final ActionFuture snapshotResponse = internalCluster().masterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap") .setWaitForCompletion(true) .execute(); @@ -256,7 +256,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { private void assertSnapshotExists(String repository, String snapshot) { GetSnapshotsResponse snapshotsStatusResponse = dataNodeClient().admin() .cluster() - .prepareGetSnapshots(repository) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(snapshot) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 2fed0a45032a9..4bd8fadc93095 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -554,7 +554,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws InterruptedException, // it snapshots and that will write a new segments.X+1 file logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("fs") .setSettings( Settings.builder() @@ -564,10 +564,11 @@ public void testCorruptFileThenSnapshotAndRestore() throws InterruptedException, ) ); logger.info("--> snapshot"); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setIndices("test") - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setIndices("test").get(); final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state(); logger.info("--> snapshot terminated with state " + snapshotState); final List files = listShardFiles(shardRouting); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index d1462ef8da3dc..267af6cc1fb01 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -247,12 +247,13 @@ public void testSpecifiedIndexUnavailableSnapshotRestore() throws Exception { ensureGreen("test1"); waitForRelocation(); - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "dummy-repo" + ).setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - clusterAdmin().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", "snap1").setWaitForCompletion(true).get(); verify(snapshot("snap2", "test1", "test2"), true); verify(restore("snap1", "test1", "test2"), true); @@ -364,12 +365,13 @@ public void testWildcardBehaviourSnapshotRestore() throws Exception { ensureGreen("foobar"); waitForRelocation(); - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "dummy-repo" + ).setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - clusterAdmin().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", "snap1").setWaitForCompletion(true).get(); IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false); verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), true); @@ -656,11 +658,13 @@ static GetSettingsRequestBuilder getSettings(String... indices) { } private static CreateSnapshotRequestBuilder snapshot(String name, String... indices) { - return clusterAdmin().prepareCreateSnapshot("dummy-repo", name).setWaitForCompletion(true).setIndices(indices); + return clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", name) + .setWaitForCompletion(true) + .setIndices(indices); } private static RestoreSnapshotRequestBuilder restore(String name, String... indices) { - return clusterAdmin().prepareRestoreSnapshot("dummy-repo", name) + return clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", name) .setRenamePattern("(.+)") .setRenameReplacement("$1-copy-" + name) .setWaitForCompletion(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4f15b82ca1f16..676f8185ecb84 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -861,7 +861,7 @@ public void testSnapshotRecovery() throws Exception { indicesAdmin().prepareClose(INDEX_NAME).get(); logger.info("--> restore"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, SNAP_NAME) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) .get(); int totalShards = restoreSnapshotResponse.getRestoreInfo().totalShards(); @@ -1976,7 +1976,7 @@ private void assertGlobalCheckpointIsStableAndSyncedInAllNodes(String indexName, private void createRepository(boolean enableSnapshotPeerRecoveries) { assertAcked( - clusterAdmin().preparePutRepository(REPO_NAME) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPO_NAME) .setType("fs") .setSettings( Settings.builder() @@ -1988,7 +1988,7 @@ private void createRepository(boolean enableSnapshotPeerRecoveries) { } private CreateSnapshotResponse createSnapshot(String indexName) { - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, SNAP_NAME) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -1999,7 +1999,7 @@ private CreateSnapshotResponse createSnapshot(String indexName) { ); assertThat( - clusterAdmin().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS) ); return createSnapshotResponse; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index 212cf7510d349..2f336f25c3cab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -243,7 +243,7 @@ public void testFailingReposAreTreatedAsNonExistingShardSnapshots() throws Excep ); assertAcked( - clusterAdmin().preparePutRepository(failingRepo.v1()) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, failingRepo.v1()) .setType(FailingRepoPlugin.TYPE) .setVerify(false) .setSettings(Settings.builder().put(repoFailureType, true).put("location", failingRepo.v2())) @@ -290,7 +290,7 @@ private ShardId getShardIdForIndex(String indexName) { private void createRepository(String repositoryName, String type, Path location, boolean recoveryEnabledRepo) { assertAcked( - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType(type) .setVerify(false) .setSettings( @@ -302,6 +302,9 @@ private void createRepository(String repositoryName, String type, Path location, } private void createSnapshot(String repoName, String snapshotName, String index) { - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(index).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setIndices(index) + .get(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index ea2c221c8c4a4..6c7bcd17af1f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -145,7 +145,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce if (useBwCFormat) { // Reload the RepositoryData so we don't use cached data that wasn't serialized - assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get()); createRepository(repoName, "fs", repoPath); } @@ -176,10 +176,11 @@ public void testGetShardSnapshotWhileThereIsARunningSnapshot() throws Exception blockAllDataNodes(fsRepoName); final String snapshotName = "snap-1"; - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(fsRepoName, snapshotName) - .setIndices(indexName) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + fsRepoName, + snapshotName + ).setIndices(indexName).setWaitForCompletion(true).execute(); waitForBlockOnAnyDataNode(fsRepoName); @@ -292,7 +293,7 @@ public void testFailedSnapshotsAreNotReturned() throws Exception { ((MockRepository) repositoriesService.repository(repoName)).setBlockAndFailOnWriteSnapFiles(); } - clusterAdmin().prepareCreateSnapshot(repoName, "snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap") .setIndices(indexName) .setWaitForCompletion(false) .setFeatureStates(NO_FEATURE_STATES_VALUE) @@ -341,9 +342,9 @@ private PlainActionFuture getLatestSnapshotForShardFut PlainActionFuture future = new PlainActionFuture<>(); final GetShardSnapshotRequest request; if (useAllRepositoriesRequest && randomBoolean()) { - request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); + request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(TEST_REQUEST_TIMEOUT, shardId); } else { - request = GetShardSnapshotRequest.latestSnapshotInRepositories(shardId, repositories); + request = GetShardSnapshotRequest.latestSnapshotInRepositories(TEST_REQUEST_TIMEOUT, shardId, repositories); } client().execute(TransportGetShardSnapshotAction.TYPE, request, future); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java index f931eb717457d..d2f567567957c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java @@ -106,7 +106,7 @@ public void testCreateInvalidRepository() throws Exception { // verification should fail with some node has InvalidRepository final var expectedException = expectThrows( RepositoryVerificationException.class, - clusterAdmin().prepareVerifyRepository(repositoryName) + clusterAdmin().prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) ); for (Throwable suppressed : expectedException.getSuppressed()) { Throwable outerCause = suppressed.getCause(); @@ -130,16 +130,27 @@ public void testCreateInvalidRepository() throws Exception { // put repository again: let all node can create repository successfully createRepository(repositoryName, UnstableRepository.TYPE, Settings.builder().put("location", randomRepoPath())); // verification should succeed with all node create repository successfully - VerifyRepositoryResponse verifyRepositoryResponse = clusterAdmin().prepareVerifyRepository(repositoryName).get(); + VerifyRepositoryResponse verifyRepositoryResponse = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repositoryName + ).get(); assertEquals(verifyRepositoryResponse.getNodes().size(), internalCluster().numDataAndMasterNodes()); } private void createRepository(String name, String type, Settings.Builder settings) { // create - assertAcked(clusterAdmin().preparePutRepository(name).setType(type).setVerify(false).setSettings(settings).get()); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) + .setType(type) + .setVerify(false) + .setSettings(settings) + .get() + ); // get - final GetRepositoriesResponse updatedGetRepositoriesResponse = clusterAdmin().prepareGetRepositories(name).get(); + final GetRepositoriesResponse updatedGetRepositoriesResponse = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, name) + .get(); // assert assertThat(updatedGetRepositoriesResponse.repositories(), hasSize(1)); final RepositoryMetadata updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java index 76f3ca328d222..1b536aa9be982 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java @@ -46,11 +46,17 @@ public void testUpdateRepository() { final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); - assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .setType(FsRepository.TYPE) + .setSettings(repoSettings) + ); final GetRepositoriesResponse originalGetRepositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(repositoryName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName) .get(); assertThat(originalGetRepositoriesResponse.repositories(), hasSize(1)); @@ -64,11 +70,17 @@ public void testUpdateRepository() { final boolean updated = randomBoolean(); final String updatedRepositoryType = updated ? "mock" : FsRepository.TYPE; - assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType(updatedRepositoryType).setSettings(repoSettings)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .setType(updatedRepositoryType) + .setSettings(repoSettings) + ); final GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(repositoryName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName) .get(); assertThat(updatedGetRepositoriesResponse.repositories(), hasSize(1)); @@ -82,6 +94,12 @@ public void testUpdateRepository() { // check that a noop update does not verify. Since the new data node does not share the same `path.repo`, verification will fail if // it runs. internalCluster().startDataOnlyNode(Settings.builder().put(Environment.PATH_REPO_SETTING.getKey(), createTempDir()).build()); - assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType(updatedRepositoryType).setSettings(repoSettings)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .setType(updatedRepositoryType) + .setSettings(repoSettings) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index bf937a9d57f02..01b01fdf5fcde 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -59,7 +59,10 @@ public void testRepeatCleanupsDontRemove() throws Exception { final ActionFuture cleanupFuture = startBlockedCleanup("test-repo"); logger.info("--> sending another cleanup"); - assertFutureThrows(clusterAdmin().prepareCleanupRepository("test-repo").execute(), IllegalStateException.class); + assertFutureThrows( + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").execute(), + IllegalStateException.class + ); logger.info("--> ensure cleanup is still in progress"); final RepositoryCleanupInProgress cleanup = clusterAdmin().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE); @@ -85,7 +88,7 @@ private ActionFuture startBlockedCleanup(String repoN createRepository(repoName, "mock"); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot(repoName, "test-snap").setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap").setWaitForCompletion(true).get(); final BlobStoreRepository repository = getRepositoryOnMaster(repoName); @@ -111,7 +114,7 @@ private ActionFuture startBlockedCleanup(String repoN final ActionFuture future = internalCluster().nonMasterClient() .admin() .cluster() - .prepareCleanupRepository(repoName) + .prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .execute(); final String masterNode = internalCluster().getMasterName(); @@ -128,9 +131,11 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti logger.info("--> create three snapshots"); for (int i = 0; i < 3; ++i) { - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, "test-snap-" + i) - .setWaitForCompletion(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + "test-snap-" + i + ).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS)); } @@ -158,7 +163,7 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti } logger.info("--> cleanup repository"); - clusterAdmin().prepareCleanupRepository(repoName).get(); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(); BlobStoreTestUtil.assertConsistency(repository); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java index 6c8f4c04e2a75..a9f8e0563c1ff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java @@ -65,15 +65,15 @@ public void testSnapshotOperationPurposes() throws Exception { } final var timeout = TimeValue.timeValueSeconds(10); - clusterAdmin().prepareCleanupRepository(repoName).get(timeout); - clusterAdmin().prepareCloneSnapshot(repoName, "snap-0", "clone-0").setIndices("index-0").get(timeout); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(timeout); + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-0", "clone-0").setIndices("index-0").get(timeout); // restart to ensure that the reads which happen when starting a node on a nonempty repository use the expected purposes internalCluster().fullRestart(); - clusterAdmin().prepareGetSnapshots(repoName).get(timeout); + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get(timeout); - clusterAdmin().prepareRestoreSnapshot(repoName, "clone-0") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "clone-0") .setRenamePattern("index-0") .setRenameReplacement("restored-0") .setWaitForCompletion(true) @@ -83,7 +83,7 @@ public void testSnapshotOperationPurposes() throws Exception { assertTrue(startDeleteSnapshot(repoName, "snap-" + i).get(10, TimeUnit.SECONDS).isAcknowledged()); } - clusterAdmin().prepareDeleteRepository(repoName).get(timeout); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(timeout); } public static class TestPlugin extends Plugin implements RepositoryPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index fa5d8d93c9e45..1ca2526b53dff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -137,7 +137,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo final var reposResponse = client().execute( GetRepositoriesAction.INSTANCE, - new GetRepositoriesRequest(new String[] { "repo", "repo1" }) + new GetRepositoriesRequest(TEST_REQUEST_TIMEOUT, new String[] { "repo", "repo1" }) ).get(); assertThat( @@ -204,7 +204,10 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic "[err-repo] missing", expectThrows( RepositoryMissingException.class, - client().execute(GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(new String[] { "err-repo" })) + client().execute( + GetRepositoriesAction.INSTANCE, + new GetRepositoriesRequest(TEST_REQUEST_TIMEOUT, new String[] { "err-repo" }) + ) ).getMessage() ); @@ -239,7 +242,7 @@ private PutRepositoryRequest sampleRestRequest(String name) throws Exception { var bis = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) ) { - return new PutRepositoryRequest(name).source(parser.map()); + return new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name).source(parser.map()); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java index dfd753d02db67..049260e14100f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java @@ -173,7 +173,10 @@ public void testRestoreWithRemovedFileSettings() throws Exception { Files.delete(fs.watchedFile()); logger.info("--> restore global state from the snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get(); ensureGreen(); @@ -285,7 +288,10 @@ public void testRestoreWithPersistedFileSettings() throws Exception { logger.info("--> restore global state from the snapshot"); var removedReservedState = removedReservedClusterStateListener(masterNode); - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get(); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java index 2a8db5f317cfe..d3dba66055e01 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java @@ -53,10 +53,11 @@ public void testAbortedRestoreAlsoAbortFileRestores() throws Exception { failReadsAllDataNodes(repositoryName); logger.info("--> starting restore"); - final ActionFuture future = clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName) - .setWaitForCompletion(true) - .setIndices(indexName) - .execute(); + final ActionFuture future = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repositoryName, + snapshotName + ).setWaitForCompletion(true).setIndices(indexName).execute(); assertBusy(() -> { final RecoveryResponse recoveries = indicesAdmin().prepareRecoveries(indexName) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java index bd14f913b10ef..86a4d728df787 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java @@ -60,7 +60,10 @@ public void run() { snapshotExecutor.execute(new BlockingTask()); safeAwait(barrier); // wait for snapshot thread to be blocked - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-1").setWaitForCompletion(false).setPartial(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-1") + .setWaitForCompletion(false) + .setPartial(true) + .get(); // resulting cluster state has been applied on all nodes, which means the first task for the SNAPSHOT pool is queued up final var snapshot = SnapshotsInProgress.get(clusterService.state()).forRepo(repoName).get(0).snapshot(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java index df4d52727384f..ed0e226fc377b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java @@ -112,7 +112,7 @@ public void testIncrementalBehaviorOnPrimaryFailover() throws InterruptedExcepti final String snapshot3 = "snap-3"; logger.info("--> creating snapshot 3"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot3).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot3).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> Shutting down new primary node [{}]", newPrimary); stopNode(newPrimary); @@ -120,7 +120,7 @@ public void testIncrementalBehaviorOnPrimaryFailover() throws InterruptedExcepti final String snapshot4 = "snap-4"; logger.info("--> creating snapshot 4"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot4).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot4).setIndices(indexName).setWaitForCompletion(true).get(); assertTwoIdenticalShardSnapshots(repo, indexName, snapshot3, snapshot4); @@ -156,7 +156,7 @@ public void testForceMergeCausesFullSnapshot() throws Exception { createRepository(repo, "fs"); logger.info("--> creating snapshot 1"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> force merging down to a single segment"); final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).setFlush(true).get(); @@ -164,7 +164,7 @@ public void testForceMergeCausesFullSnapshot() throws Exception { final String snapshot2 = "snap-2"; logger.info("--> creating snapshot 2"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot2).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot2).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> asserting that the two snapshots refer to different files in the repository"); final SnapshotStats secondSnapshotShardStatus = getStats(repo, snapshot2).getIndices().get(indexName).getShards().get(0).getStats(); @@ -220,7 +220,7 @@ public void testRecordCorrectSegmentCountsWithBackgroundMerges() throws Exceptio }, 30L, TimeUnit.SECONDS); final SnapshotInfo after = createFullSnapshot(repoName, "snapshot-after"); - final int incrementalFileCount = clusterAdmin().prepareSnapshotStatus() + final int incrementalFileCount = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) .setRepository(repoName) .setSnapshots(after.snapshotId().getName()) .get() @@ -252,12 +252,12 @@ private void assertTwoIdenticalShardSnapshots(String repo, String indexName, Str } private SnapshotStatus getStats(String repository, String snapshot) { - return clusterAdmin().prepareSnapshotStatus(repository).setSnapshots(snapshot).get().getSnapshots().get(0); + return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository).setSnapshots(snapshot).get().getSnapshots().get(0); } private void ensureRestoreSingleShardSuccessfully(String repo, String indexName, String snapshot, String indexSuffix) { logger.info("--> restoring [{}]", snapshot); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repo, snapshot) + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) .setIndices(indexName) .setRenamePattern("(.+)") .setRenameReplacement("$1" + indexSuffix) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index ca06dcea88766..a16a19f66085b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -133,7 +133,7 @@ public void testCloneSnapshotIndex() throws Exception { final String targetSnapshot = "target-snapshot"; assertAcked(startClone(repoName, sourceSnapshot, targetSnapshot, indexName).get()); - final List status = clusterAdmin().prepareSnapshotStatus(repoName) + final List status = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) .setSnapshots(sourceSnapshot, targetSnapshot) .get() .getSnapshots(); @@ -171,7 +171,7 @@ public void testClonePreventsSnapshotDelete() throws Exception { unblockNode(repoName, masterName); assertAcked(cloneFuture.get()); - final List status = clusterAdmin().prepareSnapshotStatus(repoName) + final List status = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) .setSnapshots(sourceSnapshot, targetSnapshot) .get() .getSnapshots(); @@ -226,7 +226,10 @@ public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception { createIndexWithRandomDocs(indexFast, randomIntBetween(20, 100)); assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot").setIndices(indexFast).setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot") + .setIndices(indexFast) + .setWaitForCompletion(true) + .execute() ); assertThat(cloneFuture.isDone(), is(false)); @@ -250,10 +253,11 @@ public void testLongRunningSnapshotAllowsConcurrentClone() throws Exception { createIndexWithRandomDocs(indexFast, randomIntBetween(20, 100)); blockDataNode(repoName, dataNode); - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot") - .setIndices(indexFast) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + "fast-snapshot" + ).setIndices(indexFast).setWaitForCompletion(true).execute(); waitForBlock(dataNode, repoName); final String targetSnapshot = "target-snapshot"; @@ -467,7 +471,7 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { final Client masterClient = internalCluster().masterClient(); final ActionFuture sourceSnapshotFuture = masterClient.admin() .cluster() - .prepareCreateSnapshot(repoName, sourceSnapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot) .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); @@ -500,7 +504,7 @@ public void testSnapshotQueuedAfterCloneFromBrokenSourceSnapshot() throws Except final Client masterClient = internalCluster().masterClient(); final ActionFuture sourceSnapshotFuture = masterClient.admin() .cluster() - .prepareCreateSnapshot(repoName, sourceSnapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot) .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); @@ -802,13 +806,13 @@ public void testCloneAfterFailedShardSnapshot() throws Exception { blockDataNode(repoName, dataNode); final ActionFuture snapshotFuture = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "full-snapshot") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "full-snapshot") .execute(); awaitNumberOfSnapshotsInProgress(1); waitForBlock(dataNode, repoName); final ActionFuture cloneFuture = client(masterNode).admin() .cluster() - .prepareCloneSnapshot(repoName, sourceSnapshot, "target-snapshot") + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot, "target-snapshot") .setIndices(testIndex) .execute(); awaitNumberOfSnapshotsInProgress(2); @@ -842,7 +846,11 @@ private static ActionFuture startClone( String targetSnapshot, String... indices ) { - return client.admin().cluster().prepareCloneSnapshot(repoName, sourceSnapshot, targetSnapshot).setIndices(indices).execute(); + return client.admin() + .cluster() + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot, targetSnapshot) + .setIndices(indices) + .execute(); } private void blockMasterOnReadIndexMeta(String repoName) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 1152cf5f03e5a..e6b2b86d0dbeb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -115,7 +115,10 @@ public void testLongRunningSnapshotAllowsConcurrentSnapshot() throws Exception { createIndexWithContent(indexFast, dataNode2, dataNode); assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot").setIndices(indexFast).setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot") + .setIndices(indexFast) + .setWaitForCompletion(true) + .execute() ); assertThat(createSlowFuture.isDone(), is(false)); @@ -140,7 +143,10 @@ public void testRecreateCorruptedRepositoryDuringSnapshotsFails() throws Excepti final String indexFast = "index-fast"; createIndexWithContent(indexFast, fastDataNode, slowDataNode); assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot").setIndices(indexFast).setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot") + .setIndices(indexFast) + .setWaitForCompletion(true) + .execute() ); logger.info("--> corrupting the repository by moving index-N blob to next generation"); @@ -153,7 +159,9 @@ public void testRecreateCorruptedRepositoryDuringSnapshotsFails() throws Excepti logger.info("--> trying to create another snapshot in order for repository to be marked as corrupt"); final SnapshotException snapshotException = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot2").setIndices(indexFast).setWaitForCompletion(true) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot2") + .setIndices(indexFast) + .setWaitForCompletion(true) ); assertThat(snapshotException.getMessage(), containsString("failed to update snapshot in repository")); assertEquals(RepositoryData.CORRUPTED_REPO_GEN, getRepositoryMetadata(repoName).generation()); @@ -206,7 +214,7 @@ public void testDeletesAreBatched() throws Exception { } snapshotNames.removeAll(toDelete); final ListenableFuture future = new ListenableFuture<>(); - clusterAdmin().prepareDeleteSnapshot(repoName, toDelete.toArray(Strings.EMPTY_ARRAY)).execute(future); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, toDelete.toArray(Strings.EMPTY_ARRAY)).execute(future); deleteFutures.add(future); } @@ -246,7 +254,10 @@ public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception { final ActionFuture createSlowFuture = startAndBlockFailingFullSnapshot(blockedRepoName, "blocked-snapshot"); - clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot").setIndices("does-not-exist-*").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, otherRepoName, "snapshot") + .setIndices("does-not-exist-*") + .setWaitForCompletion(false) + .get(); unblockNode(blockedRepoName, internalCluster().getMasterName()); expectThrows(SnapshotException.class, createSlowFuture); @@ -382,10 +393,11 @@ public void testAbortOneOfMultipleSnapshots() throws Exception { awaitNDeletionsInProgress(1); logger.info("--> start third snapshot"); - final ActionFuture thirdSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-three") - .setIndices(secondIndex) - .setWaitForCompletion(true) - .execute(); + final ActionFuture thirdSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + "snapshot-three" + ).setIndices(secondIndex).setWaitForCompletion(true).execute(); assertThat(firstSnapshotResponse.isDone(), is(false)); assertThat(secondSnapshotResponse.isDone(), is(false)); @@ -402,7 +414,7 @@ public void testAbortOneOfMultipleSnapshots() throws Exception { logger.info("--> verify that the first snapshot is gone"); assertThat( - clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), containsInAnyOrder(secondSnapshotInfo, thirdSnapshotInfo) ); } @@ -468,7 +480,7 @@ public void testCascadedAborts() throws Exception { assertAcked(allDeletedResponse.get()); logger.info("--> verify that all snapshots are gone"); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testMasterFailOverWithQueuedDeletes() throws Exception { @@ -552,7 +564,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { logger.info("--> verify that all snapshots are gone and no more work is left in the cluster state"); awaitNoMoreRunningOperations(); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception { @@ -611,7 +623,7 @@ public void testQueuedDeletesWithFailures() throws Exception { final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testQueuedDeletesWithOverlap() throws Exception { @@ -638,7 +650,7 @@ public void testQueuedDeletesWithOverlap() throws Exception { final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testQueuedOperationsOnMasterRestart() throws Exception { @@ -651,7 +663,7 @@ public void testQueuedOperationsOnMasterRestart() throws Exception { startAndBlockOnDeleteSnapshot(repoName, "*"); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-three").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-three").setWaitForCompletion(false).get(); startDeleteSnapshot(repoName, "*"); awaitNDeletionsInProgress(2); @@ -677,20 +689,20 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { blockNodeOnAnyFiles(repoName, masterNode); ActionFuture firstDeleteFuture = client(masterNode).admin() .cluster() - .prepareDeleteSnapshot(repoName, "*") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*") .execute(); waitForBlock(masterNode, repoName); final ActionFuture createThirdSnapshot = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-three") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-three") .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); final ActionFuture secondDeleteFuture = client(masterNode).admin() .cluster() - .prepareDeleteSnapshot(repoName, "*") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*") .execute(); awaitNDeletionsInProgress(2); @@ -733,7 +745,7 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except final ActionFuture deleteFuture = client(masterNode).admin() .cluster() - .prepareDeleteSnapshot(repoName, "*") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*") .execute(); awaitNDeletionsInProgress(1); @@ -767,7 +779,7 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception corruptIndexN(repoPath, generation); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-three").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-three").setWaitForCompletion(false).get(); final ActionFuture deleteFuture = startDeleteFromNonMasterClient(repoName, "*"); awaitNDeletionsInProgress(2); @@ -975,14 +987,16 @@ public void testQueuedSnapshotsWaitingForShardReady() throws Exception { logger.info("--> start two snapshots"); final String snapshotOne = "snap-1"; final String snapshotTwo = "snap-2"; - final ActionFuture snapOneResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotOne) - .setWaitForCompletion(false) - .setIndices(testIndex) - .execute(); - final ActionFuture snapTwoResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotTwo) - .setWaitForCompletion(false) - .setIndices(testIndex) - .execute(); + final ActionFuture snapOneResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotOne + ).setWaitForCompletion(false).setIndices(testIndex).execute(); + final ActionFuture snapTwoResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotTwo + ).setWaitForCompletion(false).setIndices(testIndex).execute(); snapOneResponse.get(); snapTwoResponse.get(); @@ -1071,7 +1085,7 @@ public void testEquivalentDeletesAreDeduplicated() throws Exception { final int deletes = randomIntBetween(2, 10); final List> deleteResponses = new ArrayList<>(deletes); for (int i = 0; i < deletes; ++i) { - deleteResponses.add(clusterAdmin().prepareDeleteSnapshot(repoName, "*").execute()); + deleteResponses.add(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*").execute()); } waitForBlock(masterName, repoName); awaitNDeletionsInProgress(1); @@ -1273,10 +1287,14 @@ public void testMasterFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos() throw blockMasterFromFinalizingSnapshotOnIndexFile(repoName); blockMasterFromFinalizingSnapshotOnIndexFile(otherRepoName); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-blocked-1").setWaitForCompletion(false).get(); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-blocked-2").setWaitForCompletion(false).get(); - clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot-other-blocked-1").setWaitForCompletion(false).get(); - clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot-other-blocked-2").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-blocked-1").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-blocked-2").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, otherRepoName, "snapshot-other-blocked-1") + .setWaitForCompletion(false) + .get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, otherRepoName, "snapshot-other-blocked-2") + .setWaitForCompletion(false) + .get(); awaitNumberOfSnapshotsInProgress(4); final String initialMaster = internalCluster().getMasterName(); @@ -1323,7 +1341,7 @@ public void testConcurrentOperationsLimit() throws Exception { final ConcurrentSnapshotExecutionException cse = expectThrows( ConcurrentSnapshotExecutionException.class, - clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "expected-to-fail") ); assertThat( cse.getMessage(), @@ -1337,7 +1355,7 @@ public void testConcurrentOperationsLimit() throws Exception { ); boolean deleteAndAbortAll = false; if (deleteFuture == null && randomBoolean()) { - deleteFuture = clusterAdmin().prepareDeleteSnapshot(repoName, "*").execute(); + deleteFuture = clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*").execute(); deleteAndAbortAll = true; if (randomBoolean()) { awaitNDeletionsInProgress(1); @@ -1490,7 +1508,7 @@ public void testConcurrentRestoreDeleteAndClone() throws Exception { for (int i = 0; i < nbIndices; i++) { if (randomBoolean()) { restoreFutures.add( - clusterAdmin().prepareRestoreSnapshot(repository, "snapshot-" + i) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, "snapshot-" + i) .setIndices("index-" + i) .setRenamePattern("(.+)") .setRenameReplacement("$1-restored-" + i) @@ -1499,7 +1517,9 @@ public void testConcurrentRestoreDeleteAndClone() throws Exception { ); } else { cloneFutures.add( - clusterAdmin().prepareCloneSnapshot(repository, "snapshot-" + i, "clone-" + i).setIndices("index-" + i).execute() + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repository, "snapshot-" + i, "clone-" + i) + .setIndices("index-" + i) + .execute() ); } } @@ -1555,15 +1575,17 @@ public void testOutOfOrderFinalization() throws Exception { blockNodeWithIndex(repository, index2); - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-1" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); awaitNumberOfSnapshotsInProgress(1); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index1) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index1).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); unblockAllDataNodes(repository); final SnapshotInfo sn1 = assertSuccessful(snapshot1); @@ -1571,7 +1593,11 @@ public void testOutOfOrderFinalization() throws Exception { assertAcked(startDeleteSnapshot(repository, sn1.snapshot().getSnapshotId().getName()).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1589,17 +1615,19 @@ public void testOutOfOrderAndConcurrentFinalization() throws Exception { blockNodeWithIndex(repository, index2); - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-1" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); awaitNumberOfSnapshotsInProgress(1); blockMasterOnWriteIndexFile(repository); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index1) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index1).setWaitForCompletion(true).execute(); awaitClusterState(state -> { final List snapshotsInProgress = SnapshotsInProgress.get(state).forRepo(repository); @@ -1616,7 +1644,11 @@ public void testOutOfOrderAndConcurrentFinalization() throws Exception { assertAcked(startDeleteSnapshot(repository, sn1.snapshot().getSnapshotId().getName()).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1638,26 +1670,33 @@ public void testOutOfOrderFinalizationWithConcurrentClone() throws Exception { blockNodeWithIndex(repository, index2); final String sn1 = "snapshot-1"; - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, sn1) + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, sn1) .setIndices(index1, index2) .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); final String targetSnapshot = "target-snapshot"; - final ActionFuture clone = clusterAdmin().prepareCloneSnapshot(repository, sourceSnapshot, targetSnapshot) - .setIndices(index1) - .execute(); + final ActionFuture clone = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + sourceSnapshot, + targetSnapshot + ).setIndices(index1).execute(); assertAcked(clone.get()); unblockAllDataNodes(repository); assertSuccessful(snapshot1); logger.info("--> deleting snapshots [{},{}] from repo [{}]", sn1, sourceSnapshot, repository); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository).setSnapshots(sn1, sourceSnapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository).setSnapshots(sn1, sourceSnapshot).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots(targetSnapshot).setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots(targetSnapshot) + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1681,6 +1720,7 @@ public void testOutOfOrderCloneFinalization() throws Exception { final String cloneTarget = "target-snapshot"; final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget @@ -1688,10 +1728,11 @@ public void testOutOfOrderCloneFinalization() throws Exception { awaitNumberOfSnapshotsInProgress(1); waitForBlock(master, repository); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index2).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); unblockNode(repository, master); @@ -1699,7 +1740,11 @@ public void testOutOfOrderCloneFinalization() throws Exception { assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1726,12 +1771,18 @@ public void testCorrectlyFinalizeOutOfOrderPartialFailures() throws Exception { waitForBlock(dataNode2, repository); unblockNode(repository, dataNode1); - assertAcked(clusterAdmin().prepareCloneSnapshot(repository, "snapshot-1", "target-1").setIndices(index1).get()); + assertAcked( + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repository, "snapshot-1", "target-1").setIndices(index1).get() + ); unblockNode(repository, dataNode2); snapshotBlocked.get(); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("target-1").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("target-1") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); @@ -1757,6 +1808,7 @@ public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { final String cloneTarget = "target-snapshot"; final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget @@ -1764,15 +1816,16 @@ public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { awaitNumberOfSnapshotsInProgress(1); waitForBlock(master, repository); - final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .setPartial(true) - .execute(); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-3" + ).setIndices(index1, index2).setWaitForCompletion(true).setPartial(true).execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index2).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); awaitNumberOfSnapshotsInProgress(2); assertFalse(snapshot3.isDone()); @@ -1784,7 +1837,11 @@ public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2", "snapshot-3").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2", "snapshot-3") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(2) ); } @@ -1808,6 +1865,7 @@ public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Excep final String cloneTarget = "target-snapshot"; final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget @@ -1815,21 +1873,23 @@ public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Excep awaitNumberOfSnapshotsInProgress(1); waitForBlock(master, repository); - final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .setPartial(true) - .execute(); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-3" + ).setIndices(index1, index2).setWaitForCompletion(true).setPartial(true).execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index2).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); awaitNumberOfSnapshotsInProgress(2); assertFalse(snapshot3.isDone()); final String cloneTarget2 = "target-snapshot-2"; final ActionFuture cloneSnapshot2 = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget2 @@ -1844,7 +1904,11 @@ public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Excep assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2", "snapshot-3").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2", "snapshot-3") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(2) ); } @@ -1874,7 +1938,7 @@ public void testQueuedAfterFailedShardSnapshot() throws Exception { final SnapshotInfo failedSnapshot = snapshotFutureFailure.get().getSnapshotInfo(); assertEquals(SnapshotState.PARTIAL, failedSnapshot.state()); - final SnapshotsStatusResponse snapshotsStatusResponse1 = clusterAdmin().prepareSnapshotStatus(repository) + final SnapshotsStatusResponse snapshotsStatusResponse1 = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(fullSnapshot) .get(); @@ -1882,14 +1946,14 @@ public void testQueuedAfterFailedShardSnapshot() throws Exception { createFullSnapshot(repository, tmpSnapshot); assertAcked(startDeleteSnapshot(repository, tmpSnapshot).get()); - final SnapshotsStatusResponse snapshotsStatusResponse2 = clusterAdmin().prepareSnapshotStatus(repository) + final SnapshotsStatusResponse snapshotsStatusResponse2 = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(fullSnapshot) .get(); assertEquals(snapshotsStatusResponse1, snapshotsStatusResponse2); assertAcked(startDeleteSnapshot(repository, "successful-snapshot").get()); - final SnapshotsStatusResponse snapshotsStatusResponse3 = clusterAdmin().prepareSnapshotStatus(repository) + final SnapshotsStatusResponse snapshotsStatusResponse3 = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(fullSnapshot) .get(); assertEquals(snapshotsStatusResponse1, snapshotsStatusResponse3); @@ -1908,26 +1972,33 @@ public void testOutOfOrderFinalizationManySnapshots() throws Exception { blockNodeWithIndex(repository, index2); - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-1" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); awaitNumberOfSnapshotsInProgress(2); - final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") - .setIndices(index1) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-3" + ).setIndices(index1).setWaitForCompletion(true).execute(); assertSuccessful(snapshot3); unblockAllDataNodes(repository); assertSuccessful(snapshot1); assertSuccessful(snapshot2); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1950,14 +2021,18 @@ public void testCloneQueuedAfterMissingShard() throws Exception { internalCluster().stopNode(dataNodes.get(0)); blockMasterOnWriteIndexFile(repository); - final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot(repository, snapshotToDelete) - .execute(); + final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + snapshotToDelete + ).execute(); awaitNDeletionsInProgress(1); final ActionFuture snapshot1 = startFullSnapshot(repository, "snapshot-1", true); awaitNumberOfSnapshotsInProgress(1); final ActionFuture cloneFuture = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, cloneSource, "target-snapshot" @@ -2021,8 +2096,11 @@ public void testSnapshotAndCloneQueuedAfterMissingShard() throws Exception { internalCluster().stopNode(dataNodes.get(0)); blockMasterOnWriteIndexFile(repository); - final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot(repository, snapshotToDelete) - .execute(); + final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + snapshotToDelete + ).execute(); awaitNDeletionsInProgress(1); final ActionFuture snapshot1 = startFullSnapshot(repository, "snapshot-1", true); @@ -2032,6 +2110,7 @@ public void testSnapshotAndCloneQueuedAfterMissingShard() throws Exception { awaitNumberOfSnapshotsInProgress(2); final ActionFuture cloneFuture = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, cloneSource, "target-snapshot" @@ -2106,7 +2185,7 @@ public void testDeleteIndexWithOutOfOrderFinalization() { final var snapshotCompleters = new HashMap(); for (final var blockingIndex : List.of("index-0", "index-1", "index-2")) { final var snapshotName = "snapshot-with-" + blockingIndex; - final var snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + final var snapshotFuture = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setPartial(true) .setIndices(indexToDelete, blockingIndex) @@ -2171,14 +2250,21 @@ public void testDeleteIndexWithOutOfOrderFinalization() { } private static void assertSnapshotStatusCountOnRepo(String otherBlockedRepoName, int count) { - final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(otherBlockedRepoName).get(); + final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus( + TEST_REQUEST_TIMEOUT, + otherBlockedRepoName + ).get(); final List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertThat(snapshotStatuses, hasSize(count)); } private ActionFuture startDeleteFromNonMasterClient(String repoName, String snapshotName) { logger.info("--> deleting snapshot [{}] from repo [{}] from non master client", snapshotName, repoName); - return internalCluster().nonMasterClient().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(); + return internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .execute(); } private ActionFuture startFullSnapshotFromNonMasterClient(String repoName, String snapshotName) { @@ -2186,7 +2272,7 @@ private ActionFuture startFullSnapshotFromNonMasterClien return internalCluster().nonMasterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(); } @@ -2196,7 +2282,7 @@ private ActionFuture startFullSnapshotFromDataNode(Strin return internalCluster().dataNodeClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(); } @@ -2206,7 +2292,7 @@ private ActionFuture startFullSnapshotFromMasterClient(S return internalCluster().masterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(); } @@ -2241,7 +2327,10 @@ private void corruptIndexN(Path repoPath, long generation) throws IOException { } private static List currentSnapshots(String repoName) { - return clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT).get().getSnapshots(); + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT) + .get() + .getSnapshots(); } private ActionFuture startAndBlockOnDeleteSnapshot(String repoName, String snapshotName) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 9eb9041aa51f1..01a18a58f663c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -70,7 +70,7 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { final String snapshot = "test-snap"; logger.info("--> creating snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshot) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-1") .get(); @@ -87,12 +87,17 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { assertRepositoryBlocked(repoName, snapshot); logger.info("--> recreate repository with same settings in order to reset corrupted state"); - assertAcked(clusterAdmin().preparePutRepository(repoName).setType("fs").setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).setType("fs").setSettings(settings) + ); startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).addSnapshots(snapshot) + ); } public void testConcurrentlyChangeRepositoryContents() throws Exception { @@ -118,7 +123,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -143,13 +148,13 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { } logger.info("--> remove repository"); - assertAcked(client.admin().cluster().prepareDeleteRepository(repoName)); + assertAcked(client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); logger.info("--> recreate repository"); assertAcked( client.admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("fs") .setSettings( Settings.builder() @@ -162,7 +167,10 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot)); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).addSnapshots(snapshot) + ); } public void testFindDanglingLatestGeneration() throws Exception { @@ -184,7 +192,7 @@ public void testFindDanglingLatestGeneration() throws Exception { final String snapshot = "test-snap"; logger.info("--> creating snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshot) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -229,7 +237,10 @@ public void testFindDanglingLatestGeneration() throws Exception { assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 2)); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).addSnapshots(snapshot) + ); } public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { @@ -252,10 +263,11 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { for (int i = 0; i < snapshots; ++i) { // Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard // generations (the existence of which would short-circuit checks for the repo containing old version snapshots) - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotPrefix + i) - .setIndices() - .setWaitForCompletion(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotPrefix + i + ).setIndices().setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), is(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -354,7 +366,7 @@ public void testMountCorruptedRepositoryData() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -373,7 +385,7 @@ public void testMountCorruptedRepositoryData() throws Exception { final String otherRepoName = "other-repo"; assertAcked( - clusterAdmin().preparePutRepository(otherRepoName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, otherRepoName) .setType("fs") .setVerify(false) // don't try and load the repo data, since it is corrupt .setSettings(Settings.builder().put("location", repo).put("compress", false)) @@ -452,7 +464,7 @@ public void testRepairBrokenShardGenerations() throws Exception { ); logger.info("--> recreating repository to clear caches"); - clusterAdmin().prepareDeleteRepository(repoName).get(); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(); createRepository(repoName, "fs", repoPath); createFullSnapshot(repoName, "snapshot-2"); @@ -501,7 +513,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { } logger.info("--> verifying snapshot state for [{}]", snapshot1); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshot1)); @@ -510,7 +522,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { assertAcked(indicesAdmin().prepareDelete(indexName)); logger.info("--> restoring snapshot [{}]", snapshot1); - clusterAdmin().prepareRestoreSnapshot("test-repo", snapshot1) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", snapshot1) .setRestoreGlobalState(randomBoolean()) .setWaitForCompletion(true) .get(); @@ -526,7 +538,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { final String snapshot2 = "test-snap-2"; logger.info("--> creating snapshot [{}]", snapshot2); - final SnapshotInfo snapshotInfo2 = clusterAdmin().prepareCreateSnapshot("test-repo", snapshot2) + final SnapshotInfo snapshotInfo2 = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", snapshot2) .setWaitForCompletion(true) .get() .getSnapshotInfo(); @@ -550,7 +562,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices(indices) .get(); @@ -576,7 +588,10 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-1") + ); for (String index : indices) { assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId()))); @@ -596,7 +611,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -613,7 +628,10 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-1") + ); } public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { @@ -636,7 +654,7 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -654,12 +672,15 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-1") + ); logger.info("--> make sure that we can create the snapshot again"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -702,18 +723,24 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { } } - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); - SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); + SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap") + .get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); assertAcked(startDeleteSnapshot("test-repo", "test-snap").get()); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap")); - ActionRequestBuilder builder = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap") + ); + ActionRequestBuilder builder = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .addSnapshots("test-snap"); expectThrows(SnapshotMissingException.class, builder); createFullSnapshot("test-repo", "test-snap"); @@ -730,7 +757,10 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") + .setWaitForCompletion(true) + .setIndices("test-idx-*") + .get(); logger.info("--> deleting shard level index file"); final Path indicesPath = repo.resolve("indices"); @@ -745,10 +775,11 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { } logger.info("--> creating another snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") - .setWaitForCompletion(true) - .setIndices("test-idx-1") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-2" + ).setWaitForCompletion(true).setIndices("test-idx-1").get(); assertEquals( createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards() - 1 @@ -759,9 +790,11 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { + "because it uses snap-*.data files and not the index-N to determine what files to restore" ); indicesAdmin().prepareDelete("test-idx-1", "test-idx-2").get(); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-1" + ).setWaitForCompletion(true).get(); assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); } @@ -784,14 +817,14 @@ private void assertRepositoryBlocked(String repo, String existingSnapshot) { logger.info("--> try to delete snapshot"); final RepositoryException ex = expectThrows( RepositoryException.class, - clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot) + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repo, existingSnapshot) ); assertThat(ex.getMessage(), containsString("concurrent modification of the index-N file")); logger.info("--> try to create snapshot"); final RepositoryException ex2 = expectThrows( RepositoryException.class, - clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, existingSnapshot) ); assertThat(ex2.getMessage(), containsString("The repository has been disabled to prevent data corruption")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index ef8ae3cf1cffb..041d722591391 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -52,22 +52,22 @@ public void testShouldNotRestoreRepositoryMetadata() { assertThat(getSnapshot("test-repo-1", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); logger.info("delete repository"); - assertAcked(clusterAdmin().prepareDeleteRepository("test-repo-1")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1")); logger.info("create another repository"); createRepository("test-repo-2", "fs", repoPath); logger.info("restore snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo-2", "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo-2", "test-snap") .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) .get(); logger.info("make sure old repository wasn't restored"); - ActionRequestBuilder builder = clusterAdmin().prepareGetRepositories("test-repo-1"); + ActionRequestBuilder builder = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "test-repo-1"); expectThrows(RepositoryMissingException.class, builder); - assertThat(clusterAdmin().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); + assertThat(clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "test-repo-2").get().repositories().size(), equalTo(1)); } public void testShouldRestoreOnlySnapshotMetadata() throws Exception { @@ -100,7 +100,7 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { })); logger.info("restore snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snapshot") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot") .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 88c94985194fc..b2b3de51dd04b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -130,7 +130,10 @@ public void testSnapshotDuringNodeShutdown() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -168,7 +171,10 @@ public void testSnapshotWithStuckNode() throws Exception { assertFileCount(repo, 0); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -178,7 +184,7 @@ public void testSnapshotWithStuckNode() throws Exception { ActionFuture deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)) .admin() .cluster() - .prepareDeleteSnapshot("test-repo", "test-snap") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .execute(); // Make sure that abort makes some progress Thread.sleep(100); @@ -194,10 +200,13 @@ public void testSnapshotWithStuckNode() throws Exception { } logger.info("--> making sure that snapshot no longer exists"); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap") + ); logger.info("--> trigger repository cleanup"); - clusterAdmin().prepareCleanupRepository("test-repo").get(); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").get(); // Expect two or three files to remain in the repository: // (1) index-latest @@ -266,7 +275,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> start snapshot with default settings without a closed index - should fail"); final SnapshotException sne = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) ); @@ -274,13 +283,13 @@ public void testRestoreIndexWithMissingShards() throws Exception { if (randomBoolean()) { logger.info("checking snapshot completion using status"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(false) .setPartial(true) .get(); assertBusy(() -> { - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap-2") .get(); List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); @@ -288,7 +297,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); assertTrue(snapshotStatuses.get(0).getState().completed()); }, 1, TimeUnit.MINUTES); - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap-2") .get(); List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); @@ -308,7 +317,11 @@ public void testRestoreIndexWithMissingShards() throws Exception { }, 1, TimeUnit.MINUTES); } else { logger.info("checking snapshot completion using wait_for_completion flag"); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-2" + ) .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) .setPartial(true) @@ -328,7 +341,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore incomplete snapshot - should fail"); assertFutureThrows( - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setWaitForCompletion(true) .execute(), @@ -336,11 +349,11 @@ public void testRestoreIndexWithMissingShards() throws Exception { ); logger.info("--> restore snapshot for the index that was snapshotted completely"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") - .setRestoreGlobalState(false) - .setIndices("test-idx-all") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-2" + ).setRestoreGlobalState(false).setIndices("test-idx-all").setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6)); @@ -349,7 +362,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore snapshot for the partial index"); cluster().wipeIndices("test-idx-some"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setIndices("test-idx-some") .setPartial(true) @@ -363,7 +376,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully"); cluster().wipeIndices("test-idx-none"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setIndices("test-idx-none") .setPartial(true) @@ -376,7 +389,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L))); logger.info("--> restore snapshot for the closed index that was snapshotted completely"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setIndices("test-idx-closed") .setWaitForCompletion(true) @@ -429,7 +442,7 @@ public boolean clearData(String nodeName) { logger.info("--> restore index snapshot"); assertThat( - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setRestoreGlobalState(false) .setWaitForCompletion(true) .get() @@ -460,14 +473,14 @@ public void testRegistrationFailure() { internalCluster().startNode(nonMasterNode()); // Register mock repositories for (int i = 0; i < 5; i++) { - clusterAdmin().preparePutRepository("test-repo" + i) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo" + i) .setType("mock") .setSettings(Settings.builder().put("location", randomRepoPath())) .setVerify(false) .get(); } logger.info("--> make sure that properly setup repository can be registered on all nodes"); - clusterAdmin().preparePutRepository("test-repo-0") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-0") .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) .get(); @@ -557,7 +570,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception { dataNodeClient().admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -598,7 +611,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception { dataNodeClient().admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -663,7 +676,7 @@ public void testRestoreShrinkIndex() throws Exception { clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2"); logger.info("--> restore the shrunk index and ensure all shards are allocated"); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repo, snapshot) + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) .setWaitForCompletion(true) .setIndices(shrunkIdx) .get(); @@ -678,7 +691,7 @@ public void testSnapshotWithDateMath() { logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository(repo) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -689,7 +702,7 @@ public void testSnapshotWithDateMath() { // snapshot could be taken before or after a day rollover final String expression2 = IndexNameExpressionResolver.resolveDateMathExpression(snapshotName); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo) + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo) .setSnapshots(Sets.newHashSet(expression1, expression2).toArray(Strings.EMPTY_ARRAY)) .setIgnoreUnavailable(true) .get(); @@ -715,7 +728,9 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { createRepository(repositoryName, "fs", repoPath); createFullSnapshot(repositoryName, snapshot0); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repositoryName).setSnapshots(snapshot0).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) + .setSnapshots(snapshot0) + .get(); List snapshots = response.getSnapshots(); @@ -751,7 +766,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { // drop 1st one to avoid miscalculation as snapshot reuses some files of prev snapshot assertAcked(startDeleteSnapshot(repositoryName, snapshot0).get()); - response = clusterAdmin().prepareSnapshotStatus(repositoryName).setSnapshots(snapshot1).get(); + response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName).setSnapshots(snapshot1).get(); final List snapshot1Files = scanSnapshotFolder(repoPath); final List snapshot1IndexMetaFiles = findRepoMetaBlobs(repoPath); @@ -829,7 +844,7 @@ public void testDeduplicateIndexMetadata() throws Exception { final List snapshot2IndexMetaFiles = findRepoMetaBlobs(repoPath); assertThat(snapshot2IndexMetaFiles, hasSize(2)); // should have created one new metadata blob - assertAcked(clusterAdmin().prepareDeleteSnapshot(repositoryName, snapshot0, snapshot1).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot0, snapshot1).get()); final List snapshot3IndexMetaFiles = findRepoMetaBlobs(repoPath); assertThat(snapshot3IndexMetaFiles, hasSize(1)); // should have deleted the metadata blob referenced by the first two snapshots } @@ -852,7 +867,7 @@ public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { setDisruptionScheme(disruption); client(internalCluster().getMasterName()).admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -869,7 +884,7 @@ public boolean validateClusterForming() { logger.info("--> wait for shard snapshots to show as failed"); assertBusy( () -> assertThat( - clusterAdmin().prepareSnapshotStatus("test-repo") + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots() @@ -886,7 +901,7 @@ public boolean validateClusterForming() { disruption.stopDisrupting(); // check that snapshot completes assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); @@ -912,7 +927,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { logger.info("--> snapshot"); client(internalCluster().getMasterName()).admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -922,7 +937,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { logger.info("--> wait for shard snapshot of first primary to show as failed"); assertBusy( () -> assertThat( - clusterAdmin().prepareSnapshotStatus("test-repo") + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots() @@ -940,7 +955,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { // check that snapshot completes with both failed shards being accounted for in the snapshot result assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); @@ -1000,7 +1015,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { assertAcked(indicesAdmin().prepareClose(indexName)); logger.debug("--> restore index {} from snapshot", indexName); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .get(); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(shardCount)); @@ -1117,7 +1132,7 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { ); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot(repoName, "test-snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap") .setWaitForCompletion(false) .setPartial(true) .setIndices(indexName) @@ -1184,21 +1199,21 @@ public void testDeleteIndexDuringSnapshot() throws Exception { } logger.info("--> restore snapshot 1"); - clusterAdmin().prepareRestoreSnapshot(repoName, firstSnapshotName).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, firstSnapshotName).get(); ensureGreen(indexName); } // create and delete a snapshot of the given name and for the given single index in a loop until the index is removed from the cluster // at which point doneListener is resolved private void startSnapshotDeleteLoop(String repoName, String indexName, String snapshotName, ActionListener doneListener) { - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setPartial(true) .setIndices(indexName) .execute(new ActionListener<>() { @Override public void onResponse(CreateSnapshotResponse createSnapshotResponse) { - clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName) + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionTestUtils.assertNoFailureListener(acknowledgedResponse -> { assertAcked(acknowledgedResponse); startSnapshotDeleteLoop(repoName, indexName, snapshotName, doneListener); @@ -1215,7 +1230,7 @@ public void onFailure(Exception e) { public void testGetReposWithWildcard() { internalCluster().startMasterOnlyNode(); - List repositoryMetadata = clusterAdmin().prepareGetRepositories("*").get().repositories(); + List repositoryMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "*").get().repositories(); assertThat(repositoryMetadata, empty()); } @@ -1231,9 +1246,13 @@ public void testConcurrentSnapshotAndRepoDelete() throws Exception { // concurrently trigger repository and snapshot deletes final List> deleteFutures = new ArrayList<>(snapshotCount); - final ActionFuture deleteRepoFuture = clusterAdmin().prepareDeleteRepository(repoName).execute(); + final ActionFuture deleteRepoFuture = clusterAdmin().prepareDeleteRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ).execute(); for (String snapshotName : snapshotNames) { - deleteFutures.add(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).execute()); + deleteFutures.add(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).execute()); } try { @@ -1278,7 +1297,7 @@ public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessExcept createSnapshot(repoName, snapshot1, List.of(index1)); createSnapshot(repoName, snapshot2, List.of(index2)); - clusterAdmin().prepareDeleteSnapshot(repoName, snapshot1, snapshot2).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot1, snapshot2).get(); mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index c3dbfd03cae38..64c168ae73905 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -70,7 +70,10 @@ public void testResetSystemIndices() throws Exception { refresh("my_index"); // call the reset API - ResetFeatureStateResponse apiResponse = client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).get(); + ResetFeatureStateResponse apiResponse = client().execute( + ResetFeatureStateAction.INSTANCE, + new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT) + ).get(); assertThat( apiResponse.getFeatureStateResetStatuses(), containsInAnyOrder( @@ -105,7 +108,7 @@ public void testFeatureResetFailure() throws Exception { EvilSystemIndexTestPlugin.setBeEvil(true); ResetFeatureStateResponse resetFeatureStateResponse = client().execute( ResetFeatureStateAction.INSTANCE, - new ResetFeatureStateRequest() + new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT) ).get(); List failedFeatures = resetFeatureStateResponse.getFeatureStateResetStatuses() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index a04d1a5c8b02d..7c5f38fee02a9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -82,7 +82,10 @@ public void testSortBy() throws Exception { } private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) { - final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(); + final List defaultSorting = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) + .setOrder(order) + .get() + .getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); final String[] repos = { repoName }; assertSnapshotListSorted(allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.NAME, order), SnapshotSortKey.NAME, order); @@ -187,7 +190,7 @@ public void testSortAndPaginateWithInProgress() throws Exception { assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.START_TIME); assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.NAME); assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.INDICES); - final List currentSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List currentSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT) .get() .getSnapshots(); @@ -196,7 +199,7 @@ public void testSortAndPaginateWithInProgress() throws Exception { } assertThat( - clusterAdmin().prepareGetSnapshots(matchAllPattern()) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT, "-snap*") .get() .getSnapshots(), @@ -219,14 +222,14 @@ public void testPaginationRequiresVerboseListing() throws Exception { createNSnapshots(repoName, randomIntBetween(1, 5)); expectThrows( ActionRequestValidationException.class, - clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setVerbose(false) .setSort(SnapshotSortKey.DURATION) .setSize(GetSnapshotsRequest.NO_LIMIT) ); expectThrows( ActionRequestValidationException.class, - clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setVerbose(false) .setSort(SnapshotSortKey.START_TIME) .setSize(randomIntBetween(1, 100)) @@ -293,14 +296,23 @@ public void testExcludePatterns() throws Exception { ); assertThat(allInOtherWithoutOtherExplicit, is(allInOther)); - assertThat(clusterAdmin().prepareGetSnapshots(matchAllPattern()).setSnapshots("other*", "-o*").get().getSnapshots(), empty()); - assertThat(clusterAdmin().prepareGetSnapshots("other*", "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), empty()); assertThat( - clusterAdmin().prepareGetSnapshots("other*", otherRepo, "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()).setSnapshots("other*", "-o*").get().getSnapshots(), + empty() + ); + assertThat( + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "other*", "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), + empty() + ); + assertThat( + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "other*", otherRepo, "-o*") + .setSnapshots(matchAllPattern()) + .get() + .getSnapshots(), empty() ); assertThat( - clusterAdmin().prepareGetSnapshots(matchAllPattern()) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots("non-existing*", otherPrefixSnapshot1, "-o*") .get() .getSnapshots(), @@ -332,7 +344,7 @@ public void testNamesStartingInDash() { final SnapshotInfo weirdSnapshot1InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot1); final SnapshotInfo weirdSnapshot2InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot2); - final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSort(SnapshotSortKey.REPOSITORY) .get() .getSnapshots(); @@ -395,14 +407,18 @@ public void testNamesStartingInDash() { } private List getAllByPatterns(String[] repos, String[] snapshots) { - return clusterAdmin().prepareGetSnapshots(repos).setSnapshots(snapshots).setSort(SnapshotSortKey.REPOSITORY).get().getSnapshots(); + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repos) + .setSnapshots(snapshots) + .setSort(SnapshotSortKey.REPOSITORY) + .get() + .getSnapshots(); } public void testFilterBySLMPolicy() throws Exception { final String repoName = "test-repo"; createRepository(repoName, "fs"); createNSnapshots(repoName, randomIntBetween(1, 5)); - final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.NAME) .get() @@ -410,7 +426,7 @@ public void testFilterBySLMPolicy() throws Exception { final String snapshotWithPolicy = "snapshot-with-policy"; final String policyName = "some-policy"; final SnapshotInfo withPolicy = assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, policyName)) .setWaitForCompletion(true) .execute() @@ -429,7 +445,7 @@ public void testFilterBySLMPolicy() throws Exception { final String snapshotWithOtherPolicy = "snapshot-with-other-policy"; final String otherPolicyName = "other-policy"; final SnapshotInfo withOtherPolicy = assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithOtherPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithOtherPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, otherPolicyName)) .setWaitForCompletion(true) .execute() @@ -438,7 +454,7 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName), is(List.of(withOtherPolicy, withPolicy))); assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName, "no-such-policy*"), is(List.of(withOtherPolicy, withPolicy))); - final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.NAME) .get() @@ -459,7 +475,7 @@ public void testSortAfter() throws Exception { final SnapshotInfo snapshot3 = createFullSnapshotWithUniqueTimestamps(repoName, "snapshot-3", startTimes, durations); createIndexWithContent("index-3"); - final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .get() @@ -486,7 +502,7 @@ public void testSortAfter() throws Exception { assertThat(allAfterNameAscending(name3), is(List.of(snapshot3))); assertThat(allAfterNameAscending("z"), empty()); - final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) @@ -507,7 +523,7 @@ public void testSortAfter() throws Exception { assertThat(allBeforeNameDescending(name1), is(List.of(snapshot1))); assertThat(allBeforeNameDescending("a"), empty()); - final List allSnapshotInfoByDuration = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoByDuration = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.DURATION) .get() @@ -523,7 +539,7 @@ public void testSortAfter() throws Exception { assertThat(allAfterDurationAscending(duration3), is(List.of(allSnapshotInfoByDuration.get(2)))); assertThat(allAfterDurationAscending(duration3 + 1), empty()); - final List allSnapshotInfoByDurationDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoByDurationDesc = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.DURATION) .setOrder(SortOrder.DESC) @@ -541,7 +557,7 @@ public void testSortAfter() throws Exception { assertThat(allSnapshots(new String[] { "snap*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(allSnapshotInfo)); assertThat(allSnapshots(new String[] { "o*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(List.of(otherSnapshot))); - final GetSnapshotsResponse paginatedResponse = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final GetSnapshotsResponse paginatedResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots("snap*") .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") @@ -550,7 +566,7 @@ public void testSortAfter() throws Exception { .get(); assertThat(paginatedResponse.getSnapshots(), is(List.of(snapshot2))); assertThat(paginatedResponse.totalCount(), is(3)); - final GetSnapshotsResponse paginatedResponse2 = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final GetSnapshotsResponse paginatedResponse2 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots("snap*") .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") @@ -570,7 +586,7 @@ public void testRetrievingSnapshotsWhenRepositoryIsMissing() throws Exception { final List snapshotNames = createNSnapshots(repoName, randomIntBetween(1, 10)); snapshotNames.sort(String::compareTo); - final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(repoName, missingRepoName) + final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName, missingRepoName) .setSort(SnapshotSortKey.NAME) .get(); assertThat(response.getSnapshots().stream().map(info -> info.snapshotId().getName()).toList(), equalTo(snapshotNames)); @@ -626,7 +642,7 @@ private List allBeforeDurationDescending(long duration) { } private static List allSnapshots(String[] snapshotNames, SnapshotSortKey sortBy, SortOrder order, Object fromSortValue) { - return clusterAdmin().prepareGetSnapshots(matchAllPattern()) + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(snapshotNames) .setSort(sortBy) .setFromSortValue(fromSortValue.toString()) @@ -636,7 +652,7 @@ private static List allSnapshots(String[] snapshotNames, SnapshotS } private static List getAllSnapshotsForPolicies(String... policies) { - return clusterAdmin().prepareGetSnapshots(matchAllPattern()) + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setPolicies(policies) .setSort(SnapshotSortKey.NAME) @@ -722,7 +738,7 @@ private static GetSnapshotsResponse sortedWithLimit(String[] repoNames, Snapshot } private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String[] repoNames) { - return clusterAdmin().prepareGetSnapshots(repoNames) + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoNames) .setSnapshots("*", "-" + AbstractSnapshotIntegTestCase.OLD_VERSION_SNAPSHOT_PREFIX + "*"); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index 0aa3475de7be1..8ab1ddd85fc55 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -75,7 +75,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertIndexMetadataLoads("snap", "others", 0); // Getting a snapshot does not load any metadata - GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots("repository") + GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "repository") .addSnapshots("snap") .setVerbose(randomBoolean()) .get(); @@ -85,7 +85,9 @@ public void testWhenMetadataAreLoaded() throws Exception { assertIndexMetadataLoads("snap", "others", 0); // Getting the status of a snapshot loads indices metadata but not global metadata - SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus("repository").setSnapshots("snap").get(); + SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "repository") + .setSnapshots("snap") + .get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertGlobalMetadataLoads("snap", 0); assertIndexMetadataLoads("snap", "docs", 1); @@ -94,7 +96,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertAcked(indicesAdmin().prepareDelete("docs", "others")); // Restoring a snapshot loads indices metadata but not the global state - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repository", "snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -105,7 +107,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertAcked(indicesAdmin().prepareDelete("docs")); // Restoring a snapshot with selective indices loads only required index metadata - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repository", "snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap") .setIndices("docs") .setWaitForCompletion(true) .get(); @@ -117,7 +119,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertAcked(indicesAdmin().prepareDelete("docs", "others")); // Restoring a snapshot including the global state loads it with the index metadata - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repository", "snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap") .setIndices("docs", "oth*") .setRestoreGlobalState(true) .setWaitForCompletion(true) @@ -128,7 +130,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertIndexMetadataLoads("snap", "others", 3); // Deleting a snapshot does not load the global metadata state but loads each index metadata - assertAcked(clusterAdmin().prepareDeleteSnapshot("repository", "snap").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap").get()); assertGlobalMetadataLoads("snap", 1); assertIndexMetadataLoads("snap", "docs", 4); assertIndexMetadataLoads("snap", "others", 3); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index 8fc6e9e2aa3d8..fc727007724de 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -118,16 +118,16 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { secondCluster.client() .admin() .cluster() - .preparePutRepository(repoNameOnSecondCluster) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster) .setType("fs") .setSettings(Settings.builder().put("location", repoPath)) .get(); - secondCluster.client().admin().cluster().prepareDeleteSnapshot(repoNameOnSecondCluster, "snap-1").get(); - secondCluster.client().admin().cluster().prepareDeleteSnapshot(repoNameOnSecondCluster, "snap-2").get(); + secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-1").get(); + secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-2").get(); final SnapshotException sne = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) ); assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); @@ -142,7 +142,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { + "] at generation [4]." ) ); - assertAcked(clusterAdmin().prepareDeleteRepository(repoNameOnFirstCluster).get()); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster).get()); createRepository(repoNameOnFirstCluster, "fs", repoPath); createFullSnapshot(repoNameOnFirstCluster, "snap-5"); } @@ -155,7 +155,7 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx createIndexWithRandomDocs("test-idx-1", randomIntBetween(1, 100)); createFullSnapshot(repoName, "snap-1"); - final String repoUuid = clusterAdmin().prepareGetRepositories(repoName) + final String repoUuid = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() @@ -170,7 +170,7 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx secondCluster.client() .admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("fs") .setSettings(Settings.builder().put("location", repoPath).put(READONLY_SETTING_KEY, true)) ); @@ -178,7 +178,7 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx secondCluster.client() .admin() .cluster() - .prepareGetRepositories(repoName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() @@ -189,12 +189,12 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx equalTo(repoUuid) ); - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); IOUtils.rm(internalCluster().getCurrentMasterNodeInstance(Environment.class).resolveRepoFile(repoPath.toString())); createRepository(repoName, "fs", repoPath); createFullSnapshot(repoName, "snap-1"); - final String newRepoUuid = clusterAdmin().prepareGetRepositories(repoName) + final String newRepoUuid = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() @@ -204,12 +204,13 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx .uuid(); assertThat(newRepoUuid, not(equalTo((repoUuid)))); - secondCluster.client().admin().cluster().prepareGetSnapshots(repoName).get(); // force another read of the repo data + secondCluster.client().admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get(); // force another read of the + // repo data assertThat( secondCluster.client() .admin() .cluster() - .prepareGetRepositories(repoName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 80ded243d3fb2..a96d127429b75 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -65,7 +65,10 @@ public void testRepositoryCreation() throws Exception { logger.info("--> verify the repository"); int numberOfFiles = FileSystemUtils.files(location).length; - VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get(); + VerifyRepositoryResponse verifyRepositoryResponse = client.admin() + .cluster() + .prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") + .get(); assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> verify that we didn't leave any files as a result of verification"); @@ -96,7 +99,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> check that both repositories can be retrieved by getRepositories query"); GetRepositoriesResponse repositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(randomFrom("_all", "*", "test-repo-*")) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, randomFrom("_all", "*", "test-repo-*")) .get(); assertThat(repositoriesResponse.repositories().size(), equalTo(2)); assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue()); @@ -107,7 +110,7 @@ public void testRepositoryCreation() throws Exception { assertThat( client.admin() .cluster() - .preparePutRepository("test-repo-1") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") .setType("fs") .setSettings(Settings.builder().put("location", location)) .get() @@ -117,14 +120,14 @@ public void testRepositoryCreation() throws Exception { assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); logger.info("--> delete repository test-repo-1"); - client.admin().cluster().prepareDeleteRepository("test-repo-1").get(); - repositoriesResponse = client.admin().cluster().prepareGetRepositories().get(); + client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1").get(); + repositoriesResponse = client.admin().cluster().prepareGetRepositories(TEST_REQUEST_TIMEOUT).get(); assertThat(repositoriesResponse.repositories().size(), equalTo(1)); assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue()); logger.info("--> delete repository test-repo-2"); - client.admin().cluster().prepareDeleteRepository("test-repo-2").get(); - repositoriesResponse = client.admin().cluster().prepareGetRepositories().get(); + client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2").get(); + repositoriesResponse = client.admin().cluster().prepareGetRepositories(TEST_REQUEST_TIMEOUT).get(); assertThat(repositoriesResponse.repositories().size(), equalTo(0)); } @@ -142,7 +145,7 @@ public void testMisconfiguredRepository() { logger.info("--> trying creating repository with incorrect settings"); try { - client.admin().cluster().preparePutRepository("test-repo").setType("fs").get(); + client.admin().cluster().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").setType("fs").get(); fail("Shouldn't be here"); } catch (RepositoryException ex) { assertThat(ex.getCause().getMessage(), equalTo("[test-repo] missing location")); @@ -152,7 +155,10 @@ public void testMisconfiguredRepository() { Path invalidRepoPath = createTempDir().toAbsolutePath(); String location = invalidRepoPath.toString(); try { - clusterAdmin().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", location)).get(); + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setType("fs") + .setSettings(Settings.builder().put("location", location)) + .get(); fail("Shouldn't be here"); } catch (RepositoryException ex) { assertThat( @@ -164,7 +170,11 @@ public void testMisconfiguredRepository() { public void testRepositoryAckTimeout() { logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack"); - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo-1") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo-1" + ) .setType("fs") .setSettings( Settings.builder() @@ -177,7 +187,7 @@ public void testRepositoryAckTimeout() { assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false)); logger.info("--> creating repository test-repo-2 with standard timeout - should ack"); - putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo-2") + putRepositoryResponse = clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") .setType("fs") .setSettings( Settings.builder() @@ -189,13 +199,15 @@ public void testRepositoryAckTimeout() { assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack"); - AcknowledgedResponse deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository("test-repo-2") - .setTimeout(TimeValue.ZERO) - .get(); + AcknowledgedResponse deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo-2" + ).setTimeout(TimeValue.ZERO).get(); assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false)); logger.info("--> deleting repository test-repo-1 with standard timeout - should ack"); - deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository("test-repo-1").get(); + deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1").get(); assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true)); } @@ -209,7 +221,7 @@ public void testRepositoryVerification() { logger.info("--> creating repository that cannot write any files - should fail"); ActionRequestBuilder builder3 = client.admin() .cluster() - .preparePutRepository("test-repo-1") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") .setType("mock") .setSettings(settings); expectThrows(RepositoryVerificationException.class, builder3); @@ -217,25 +229,41 @@ public void testRepositoryVerification() { logger.info("--> creating read-only repository that cannot read any files - should fail"); ActionRequestBuilder builder2 = client.admin() .cluster() - .preparePutRepository("test-repo-2") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") .setType("mock") .setSettings(readonlySettings); expectThrows(RepositoryVerificationException.class, builder2); logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings).setVerify(false)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") + .setType("mock") + .setSettings(settings) + .setVerify(false) + ); logger.info("--> verifying repository"); - ActionRequestBuilder builder1 = client.admin().cluster().prepareVerifyRepository("test-repo-1"); + ActionRequestBuilder builder1 = client.admin() + .cluster() + .prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1"); expectThrows(RepositoryVerificationException.class, builder1); logger.info("--> creating read-only repository that cannot read any files, but suppress verification - should be acked"); assertAcked( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings).setVerify(false) + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") + .setType("mock") + .setSettings(readonlySettings) + .setVerify(false) ); logger.info("--> verifying repository"); - ActionRequestBuilder builder = client.admin().cluster().prepareVerifyRepository("test-repo-2"); + ActionRequestBuilder builder = client.admin() + .cluster() + .prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2"); expectThrows(RepositoryVerificationException.class, builder); Path location = randomRepoPath(); @@ -244,7 +272,7 @@ public void testRepositoryVerification() { try { client.admin() .cluster() - .preparePutRepository("test-repo-1") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") .setType("mock") .setSettings(Settings.builder().put("location", location).put("localize_location", true)) .get(); @@ -258,7 +286,7 @@ public void testRepositoryConflict() throws Exception { logger.info("--> creating repository"); final String repo = "test-repo"; assertAcked( - clusterAdmin().preparePutRepository(repo) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) .setType("mock") .setSettings( Settings.builder() @@ -276,11 +304,11 @@ public void testRepositoryConflict() throws Exception { } refresh(); final String snapshot1 = "test-snap1"; - clusterAdmin().prepareCreateSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot1).setWaitForCompletion(true).get(); String blockedNode = internalCluster().getMasterName(); blockMasterOnWriteIndexFile(repo); logger.info("--> start deletion of snapshot"); - ActionFuture future = clusterAdmin().prepareDeleteSnapshot(repo, snapshot1).execute(); + ActionFuture future = clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot1).execute(); logger.info("--> waiting for block to kick in on node [{}]", blockedNode); waitForBlock(blockedNode, repo); @@ -295,14 +323,17 @@ public void testRepositoryConflict() throws Exception { ); logger.info("--> try deleting the repository, should fail because the deletion of the snapshot is in progress"); - RepositoryConflictException e1 = expectThrows(RepositoryConflictException.class, clusterAdmin().prepareDeleteRepository(repo)); + RepositoryConflictException e1 = expectThrows( + RepositoryConflictException.class, + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) + ); assertThat(e1.status(), equalTo(RestStatus.CONFLICT)); assertThat(e1.getMessage(), containsString("trying to modify or unregister repository that is currently used")); logger.info("--> try updating the repository, should fail because the deletion of the snapshot is in progress"); RepositoryConflictException e2 = expectThrows( RepositoryConflictException.class, - clusterAdmin().preparePutRepository(repo) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) // if "true" will deadlock on snapshot thread pool, we are running with single thread which is busy at the moment .setVerify(false) .setType("mock") @@ -357,14 +388,14 @@ public void testLeakedStaleIndicesAreDeletedBySubsequentDelete() throws Exceptio repository.setFailOnDeleteContainer(true); logger.info("--> delete the second snapshot"); - client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot2Name).get(); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot2Name).get(); // Make repository work normally repository.setFailOnDeleteContainer(false); // This snapshot should delete last snapshot's residual stale indices as well logger.info("--> delete snapshot one"); - client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot1Name).get(); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot1Name).get(); logger.info("--> check no leftover files"); assertFileCount(repositoryPath, 2); // just the index-N and index.latest blobs @@ -443,7 +474,7 @@ public void run() { clusterService.addListener(clusterStateListener); final var deleteFuture = new PlainActionFuture(); - client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).execute(deleteFuture); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName).execute(deleteFuture); safeAwait(barrier); // wait for all the snapshot threads to be blocked clusterService.removeListener(clusterStateListener); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java index d8bc9327a2edd..7664bbd6c91ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java @@ -57,11 +57,11 @@ public void testRepositoryThrottlingStats() throws Exception { createSnapshot("test-repo", "test-snap", Collections.singletonList("test-idx")); logger.info("--> restore from snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setRenamePattern("test-") - .setRenameReplacement("test2-") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setRenamePattern("test-").setRenameReplacement("test2-").setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 7aa1603735afe..7626e59cd1b9d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -91,14 +91,14 @@ public void testParallelRestoreOperations() { RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName1) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName1) .setWaitForCompletion(false) .setRenamePattern(indexName1) .setRenameReplacement(restoredIndexName1) .get(); RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName2) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName2) .setWaitForCompletion(false) .setRenamePattern(indexName2) .setRenameReplacement(restoredIndexName2) @@ -135,7 +135,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { ActionFuture restoreSnapshotResponse1 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(indexName1) .setRenamePattern(indexName1) .setRenameReplacement(restoredIndexName1) @@ -145,7 +145,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { ActionFuture restoreSnapshotResponse2 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(sameSourceIndex ? indexName1 : indexName2) .setRenamePattern(sameSourceIndex ? indexName1 : indexName2) .setRenameReplacement(restoredIndexName2) @@ -198,7 +198,7 @@ public void testRestoreLogging() throws IllegalAccessException { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(false) .setRenamePattern(indexName) .setRenameReplacement(restoredIndexName) @@ -243,9 +243,11 @@ public void testRestoreIncreasesPrimaryTerms() { assertAcked(indicesAdmin().prepareClose(indexName)); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(numPrimaries)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -297,9 +299,11 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { indicesAdmin().prepareClose("test-idx").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); @@ -338,10 +342,11 @@ public void testRestoreAliases() throws Exception { assertTrue(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot with aliases"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); // We don't restore any indices here assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -363,7 +368,7 @@ public void testRestoreAliases() throws Exception { assertTrue(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot without aliases"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) .setIncludeAliases(false) @@ -415,10 +420,11 @@ public void testRestoreTemplates() throws Exception { assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -452,7 +458,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> restore indices with different names"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -468,7 +474,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> and try to restore these indices again"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -484,7 +490,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> restore indices with different names"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+-2)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -498,7 +504,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("same-name") .setWaitForCompletion(true) @@ -512,7 +518,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("test-idx-2") .setRenameReplacement("test-idx-1") .setWaitForCompletion(true) @@ -526,7 +532,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern(".+") .setRenameReplacement("__WRONG__") @@ -541,7 +547,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern(".+") .setRenameReplacement("alias-3") @@ -556,7 +562,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern("test-idx") .setRenameReplacement("alias") @@ -571,7 +577,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1", "test-idx-2") .setRenamePattern("test-idx-1") .setRenameReplacement("alias-2") @@ -585,7 +591,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> try renaming indices into existing alias of itself, but don't restore aliases "); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern("test-idx") .setRenameReplacement("alias") @@ -614,7 +620,7 @@ public void testDynamicRestoreThrottling() throws Exception { updateClusterSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "100b")); ActionFuture restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .execute(); @@ -686,7 +692,7 @@ public void testChangeSettingsOnRestore() throws Exception { logger.info("--> try restoring while changing the number of shards - should fail"); ActionRequestBuilder builder1 = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIncorrectIndexSettings) .setWaitForCompletion(true); @@ -699,7 +705,7 @@ public void testChangeSettingsOnRestore() throws Exception { .build(); ActionRequestBuilder builder = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIncorrectReplicasIndexSettings) .setWaitForCompletion(true); @@ -708,7 +714,7 @@ public void testChangeSettingsOnRestore() throws Exception { logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) @@ -731,7 +737,7 @@ public void testChangeSettingsOnRestore() throws Exception { logger.info("--> restore index with correct settings from the snapshot"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("*") // delete everything we can delete .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) @@ -806,7 +812,7 @@ public void testRecreateBlocksOnRestore() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndexSettings(changedSettings) .setWaitForCompletion(true) .get(); @@ -858,7 +864,7 @@ public void testForbidDisableSoftDeletesDuringRestore() throws Exception { createSnapshot("test-repo", "snapshot-0", Collections.singletonList("test-index")); final SnapshotRestoreException restoreError = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "snapshot-0") .setIndexSettings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false)) .setRenamePattern("test-index") .setRenameReplacement("new-index") @@ -874,7 +880,7 @@ public void testFailOnAncientVersion() throws Exception { final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, oldSnapshot) ); assertThat( snapshotRestoreException.getMessage(), @@ -903,11 +909,11 @@ public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessExcepti new MockLog.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*") ); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) - .setIndices(indexName) - .setRestoreGlobalState(false) - .setWaitForCompletion(true) - .get(); + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setIndices(indexName).setRestoreGlobalState(false).setWaitForCompletion(true).get(); assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); mockLog.assertAllExpectationsMatched(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index d625b53785d38..a651537c77539 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -157,7 +157,7 @@ public void testBasicWorkFlow() throws Exception { createSnapshot("test-repo", "test-snap", Arrays.asList(indicesToSnapshot)); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo") + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(randomFrom("test-snap", "_all", "*", "*-snap", "test*")) .get() .getSnapshots(); @@ -189,9 +189,11 @@ public void testBasicWorkFlow() throws Exception { indicesAdmin().prepareClose("test-idx-1", "test-idx-2").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -216,7 +218,7 @@ public void testBasicWorkFlow() throws Exception { logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") .get(); @@ -263,11 +265,11 @@ public void testSingleGetAfterRestore() throws Exception { createRepository(repoName, "fs", absolutePath); createSnapshot(repoName, snapshotName, Collections.singletonList(indexName)); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .setRenamePattern(indexName) - .setRenameReplacement(restoredIndexName) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).setRenamePattern(indexName).setRenameReplacement(restoredIndexName).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client().prepareGet(restoredIndexName, docId).get().isExists(), equalTo(true)); @@ -295,9 +297,11 @@ public void testFreshIndexUUID() { indicesAdmin().prepareClose("test").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -310,7 +314,7 @@ public void testFreshIndexUUID() { ); logger.info("--> restore indices with different names"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -342,7 +346,7 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("mock") .setSettings( Settings.builder() @@ -360,10 +364,11 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException logger.info("--> snapshot"); try { - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setIndices("test-idx") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setIndices("test-idx").get(); if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) { // If we are here, that means we didn't have any failures, let's check it assertThat(getFailureCount("test-repo"), equalTo(0L)); @@ -408,7 +413,7 @@ public void testDataFileFailureDuringSnapshot() throws Exception { createIndexWithRandomDocs("test-idx", 100); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -430,7 +435,7 @@ public void testDataFileFailureDuringSnapshot() throws Exception { assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards())); // Verify that snapshot status also contains the same failures - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap") .get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); @@ -486,7 +491,7 @@ public void testDataFileFailureDuringRestore() throws Exception { logger.info("--> restore index after deletion"); final RestoreSnapshotResponse restoreResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); @@ -542,7 +547,7 @@ public void testDataFileCorruptionDuringRestore() throws Exception { logger.info("--> restore corrupt index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -640,13 +645,13 @@ private void unrestorableUseCase( // update the test repository assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("mock") .setSettings(Settings.builder().put("location", repositoryLocation).put(repositorySettings).build()) ); // attempt to restore the snapshot with the given settings - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices(indexName) .setIndexSettings(restoreIndexSettings) .setWaitForCompletion(true) @@ -685,7 +690,9 @@ private void unrestorableUseCase( // delete the index and restore again assertAcked(indicesAdmin().prepareDelete(indexName)); - restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get(); + restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(true) + .get(); assertThat(restoreResponse.getRestoreInfo().totalShards(), equalTo(numShards.numPrimaries)); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(numShards.numPrimaries)); @@ -721,7 +728,7 @@ public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Except logger.info("--> restore index after deletion"); ActionFuture restoreSnapshotResponseFuture = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .execute(); @@ -746,7 +753,7 @@ public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Except logger.info("--> trying to restore index again"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -770,7 +777,9 @@ public void testUnallocatedShards() { logger.info("--> snapshot"); final SnapshotException sne = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test-idx") ); assertThat(sne.getMessage(), containsString("the following indices have unassigned primary shards")); assertThat(getRepositoryData("test-repo"), is(RepositoryData.EMPTY)); @@ -811,12 +820,13 @@ public void testDeleteSnapshot() throws Exception { if (randomBoolean()) { for (int i = 1; i < numberOfSnapshots - 1; i++) { - client.admin().cluster().prepareDeleteSnapshot("test-repo", new String[] { "test-snap-" + i }).get(); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", new String[] { "test-snap-" + i }).get(); } } else { client.admin() .cluster() .prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, "test-repo", IntStream.range(1, numberOfSnapshots - 1).mapToObj(i -> "test-snap-" + i).toArray(String[]::new) ) @@ -834,7 +844,7 @@ public void testDeleteSnapshot() throws Exception { String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", lastSnapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", lastSnapshot) .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -880,7 +890,12 @@ public void testMoveShardWhileSnapshotting() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -905,7 +920,7 @@ public void testMoveShardWhileSnapshotting() throws Exception { logger.info("--> restore index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -937,7 +952,12 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -945,7 +965,10 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode); try { - client.admin().cluster().prepareDeleteRepository(randomFrom("test-repo", "test-*", "*")).get(); + client.admin() + .cluster() + .prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, randomFrom("test-repo", "test-*", "*")) + .get(); fail("shouldn't be able to delete in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository deletion failed"); @@ -959,7 +982,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { try { client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setVerify(false) .setType("fs") .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) @@ -973,7 +996,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo-2") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") .setVerify(false) // do not do verification itself as snapshot threads could be fully blocked .setType("fs") .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) @@ -996,7 +1019,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> restore index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -1029,7 +1052,7 @@ public void testReadonlyRepository() throws Exception { logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("readonly-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "readonly-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -1037,12 +1060,15 @@ public void testReadonlyRepository() throws Exception { assertDocCount("test-idx", 100L); logger.info("--> list available shapshots"); - GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("readonly-repo").get(); + GetSnapshotsResponse getSnapshotsResponse = client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "readonly-repo") + .get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); logger.info("--> try deleting snapshot"); assertRequestBuilderThrows( - client.admin().cluster().prepareDeleteSnapshot("readonly-repo", "test-snap"), + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "readonly-repo", "test-snap"), RepositoryException.class, "repository is readonly" ); @@ -1051,7 +1077,7 @@ public void testReadonlyRepository() throws Exception { assertRequestBuilderThrows( client.admin() .cluster() - .prepareCreateSnapshot("readonly-repo", "test-snap-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "readonly-repo", "test-snap-2") .setWaitForCompletion(true) .setIndices("test-idx"), RepositoryException.class, @@ -1086,7 +1112,7 @@ public void testSnapshotStatus() throws Exception { logger.info("--> snapshot"); client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIncludeGlobalState(false) .setIndices("test-idx") @@ -1112,7 +1138,7 @@ public void testSnapshotStatus() throws Exception { }); logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode); - SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").get(); + SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").get(); assertThat(response.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1127,7 +1153,7 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking snapshot status for all currently running and snapshot with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().get(); + response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getSnapshots().size(), equalTo(1)); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1142,11 +1168,15 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking that _current returns the currently running snapshot"); - GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().get(); + GetSnapshotsResponse getResponse = client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setCurrentSnapshot() + .get(); assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS)); - snapshotStatus = client.admin().cluster().prepareSnapshotStatus().get().getSnapshots().get(0); + snapshotStatus = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get().getSnapshots().get(0); assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getTotalShards())); assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getDoneShards())); assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); @@ -1159,7 +1189,7 @@ public void testSnapshotStatus() throws Exception { logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); logger.info("--> checking snapshot status again after snapshot is done"); - response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").get(); + response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap").get(); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); assertThat(snapshotStatus.includeGlobalState(), equalTo(false)); @@ -1172,25 +1202,31 @@ public void testSnapshotStatus() throws Exception { assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); logger.info("--> checking snapshot status after it is done with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().get(); + response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getSnapshots().size(), equalTo(0)); logger.info("--> checking that _current no longer returns the snapshot"); assertThat( - client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").get().getSnapshots().isEmpty(), + client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .addSnapshots("_current") + .get() + .getSnapshots() + .isEmpty(), equalTo(true) ); // test that getting an unavailable snapshot status throws an exception if ignoreUnavailable is false on the request SnapshotMissingException ex = expectThrows( SnapshotMissingException.class, - client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist") + client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-doesnt-exist") ); assertEquals("[test-repo:test-snap-doesnt-exist] is missing", ex.getMessage()); // test that getting an unavailable snapshot status does not throw an exception if ignoreUnavailable is true on the request response = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap-doesnt-exist") .setIgnoreUnavailable(true) .get(); @@ -1199,7 +1235,7 @@ public void testSnapshotStatus() throws Exception { // (available one should be returned) response = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap", "test-snap-doesnt-exist") .setIgnoreUnavailable(true) .get(); @@ -1229,7 +1265,12 @@ public void testSnapshotRelocatingPrimary() throws Exception { ); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); awaitNoMoreRunningOperations(); SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); @@ -1256,7 +1297,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { { SnapshotStatus snapshotStatus = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test") .get() .getSnapshots() @@ -1272,7 +1313,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { { SnapshotStatus snapshotStatus = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-1") .get() .getSnapshots() @@ -1289,7 +1330,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { { SnapshotStatus snapshotStatus = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-2") .get() .getSnapshots() @@ -1320,7 +1361,7 @@ public void testCloseOrDeleteIndexDuringSnapshot() throws Exception { createIndexWithRandomDocs("test-idx-3", 100); logger.info("--> snapshot"); - ActionFuture future = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + ActionFuture future = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-*") .setWaitForCompletion(true) .setPartial(false) @@ -1377,7 +1418,11 @@ public void testCloseIndexDuringRestore() throws Exception { final ActionFuture restoreFut; try { logger.info("--> start restore"); - restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute(); + restoreFut = client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(true) + .execute(); logger.info("--> waiting for block to kick in"); waitForBlockOnAnyDataNode("test-repo"); @@ -1419,7 +1464,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { assertThat( client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(indexName) .setWaitForCompletion(true) .get() @@ -1431,7 +1476,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { assertThat( client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName2) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName2) .setIndices(indexName) .setWaitForCompletion(true) .get() @@ -1449,7 +1494,11 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { final ActionFuture restoreFut; try { logger.info("--> start restore"); - restoreFut = client.admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true).execute(); + restoreFut = client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .execute(); logger.info("--> waiting for block to kick in"); waitForBlockOnAnyDataNode(repoName); @@ -1457,7 +1506,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { logger.info("--> try deleting the snapshot while the restore is in progress (should throw an error)"); ConcurrentSnapshotExecutionException e = expectThrows( ConcurrentSnapshotExecutionException.class, - clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName) + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) ); assertEquals(repoName, e.getRepositoryName()); assertEquals(snapshotName, e.getSnapshotName()); @@ -1487,10 +1536,22 @@ public void testSnapshotName() throws Exception { createRepository("test-repo", "fs"); - expectThrows(InvalidSnapshotNameException.class, client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo")); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo")); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo")); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo")); + expectThrows( + InvalidSnapshotNameException.class, + client.admin().cluster().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "_foo") + ); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("_foo") + ); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "_foo") + ); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("_foo") + ); } public void testListCorruptedSnapshot() throws Exception { @@ -1525,7 +1586,7 @@ public void testListCorruptedSnapshot() throws Exception { logger.info("--> get snapshots request should return both snapshots"); List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setIgnoreUnavailable(true) .get() .getSnapshots(); @@ -1536,7 +1597,7 @@ public void testListCorruptedSnapshot() throws Exception { final SnapshotException ex = expectThrows( SnapshotException.class, - client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false) + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setIgnoreUnavailable(false) ); assertThat(ex.getRepositoryName(), equalTo("test-repo")); assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); @@ -1565,12 +1626,14 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshotName)); - SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get(); + SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshotName) + .get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo(snapshotName)); @@ -1578,15 +1641,19 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { SnapshotException ex = expectThrows( SnapshotException.class, - clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setRestoreGlobalState(true).setWaitForCompletion(true) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setRestoreGlobalState(true) + .setWaitForCompletion(true) ); assertThat(ex.getRepositoryName(), equalTo(repoName)); assertThat(ex.getSnapshotName(), equalTo(snapshotName)); assertThat(ex.getMessage(), containsString("failed to read global metadata")); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(snapshotInfo.successfulShards())); @@ -1642,7 +1709,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); @@ -1651,7 +1718,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { Predicate isRestorableIndex = index -> corruptedIndex.getName().equals(index) == false; - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices(nbDocsPerIndex.keySet().stream().filter(isRestorableIndex).toArray(String[]::new)) .setRestoreGlobalState(randomBoolean()) .setWaitForCompletion(true) @@ -1688,7 +1755,7 @@ public void testCannotCreateSnapshotsWithSameName() throws Exception { try { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -1778,7 +1845,7 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("mock") .setVerify(false) .setSettings( @@ -1806,7 +1873,7 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { try { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -1834,7 +1901,7 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", repoPath)) @@ -1875,18 +1942,27 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { } logger.info("--> verify _all returns snapshot info"); - GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("_all").setVerbose(false).get(); + GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("_all") + .setVerbose(false) + .get(); assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); logger.info("--> verify wildcard returns snapshot info"); - response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap-*").setVerbose(false).get(); + response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap-*") + .setVerbose(false) + .get(); assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); logger.info("--> verify individual requests return snapshot info"); for (int i = 0; i < numSnapshots; i++) { - response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap-" + i).setVerbose(false).get(); + response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap-" + i) + .setVerbose(false) + .get(); assertEquals(1, response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); } @@ -1927,7 +2003,7 @@ public void testSnapshottingWithMissingSequenceNumbers() throws Exception { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .execute() .get(); @@ -1986,7 +2062,7 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException assertThat(snapshot2.successfulShards(), is(newShardCount)); logger.info("--> restoring snapshot 1"); - clusterAdmin().prepareRestoreSnapshot(repoName, "snap-1") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-1") .setIndices(indexName) .setRenamePattern(indexName) .setRenameReplacement("restored-1") @@ -1994,7 +2070,7 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException .get(); logger.info("--> restoring snapshot 2"); - clusterAdmin().prepareRestoreSnapshot(repoName, "snap-2") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-2") .setIndices(indexName) .setRenamePattern(indexName) .setRenameReplacement("restored-2") @@ -2019,7 +2095,7 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException } assertAcked(startDeleteSnapshot(repoName, snapshotToDelete).get()); logger.info("--> restoring snapshot [{}]", snapshotToRestore); - clusterAdmin().prepareRestoreSnapshot(repoName, snapshotToRestore) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotToRestore) .setIndices(indexName) .setRenamePattern(indexName) .setRenameReplacement("restored-3") @@ -2048,8 +2124,8 @@ public void testBulkDeleteWithOverlappingPatterns() { } logger.info("--> deleting all snapshots"); - clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snap-*", "*").get(); - final GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots("test-repo").get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-*", "*").get(); + final GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get(); assertThat(getSnapshotsResponse.getSnapshots(), empty()); } @@ -2081,7 +2157,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots(repoName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setSnapshots(randomFrom(snapName, "_all", "*", "*-snap", "test*")) .get() .getSnapshots(); @@ -2095,11 +2171,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that hidden indices get restored with a wildcard restore { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices("*") - .execute() - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices("*").execute().get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -2115,11 +2191,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that exclusions work on hidden indices { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices("*", "-.*") - .execute() - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices("*", "-.*").execute().get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -2135,11 +2211,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that hidden indices can be restored with a non-star pattern { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices("hid*") - .execute() - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices("hid*").execute().get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -2155,10 +2231,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that hidden indices can be restored by fully specified name { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices(dottedHiddenIndex) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices(dottedHiddenIndex).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index c31eafa8444ad..6c91db0ad7228 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -70,7 +70,11 @@ public void testExceptionWhenRestoringPersistentSettings() { logger.info("--> restore snapshot"); final IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true) + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) ); assertEquals(BrokenSettingPlugin.EXCEPTION.getMessage(), ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index 05888fd776641..b0c5e73de5859 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -107,15 +107,15 @@ public void testIncludeGlobalState() throws Exception { } logger.info("--> snapshot without global state"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-no-global-state") - .setIndices() - .setIncludeGlobalState(false) - .setWaitForCompletion(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state" + ).setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(getSnapshot("test-repo", "test-snap-no-global-state").state(), equalTo(SnapshotState.SUCCESS)); - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap-no-global-state") .get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); @@ -123,7 +123,7 @@ public void testIncludeGlobalState() throws Exception { assertThat(snapshotStatus.includeGlobalState(), equalTo(false)); logger.info("--> snapshot with global state"); - createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-with-global-state") + createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-with-global-state") .setIndices() .setIncludeGlobalState(true) .setWaitForCompletion(true) @@ -131,7 +131,9 @@ public void testIncludeGlobalState() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(getSnapshot("test-repo", "test-snap-with-global-state").state(), equalTo(SnapshotState.SUCCESS)); - snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-with-global-state").get(); + snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .addSnapshots("test-snap-with-global-state") + .get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); assertThat(snapshotStatus.includeGlobalState(), equalTo(true)); @@ -154,10 +156,11 @@ public void testIncludeGlobalState() throws Exception { } logger.info("--> try restoring from snapshot without global state"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state") - .setWaitForCompletion(true) - .setRestoreGlobalState(false) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state" + ).setWaitForCompletion(true).setRestoreGlobalState(false).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); @@ -165,7 +168,7 @@ public void testIncludeGlobalState() throws Exception { assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-with-global-state") .setWaitForCompletion(true) .setRestoreGlobalState(true) .get(); @@ -192,11 +195,11 @@ public void testIncludeGlobalState() throws Exception { createIndexWithRandomDocs("test-idx", 100); logger.info("--> snapshot without global state but with indices"); - createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index") - .setIndices("test-idx") - .setIncludeGlobalState(false) - .setWaitForCompletion(true) - .get(); + createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state-with-index" + ).setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -221,10 +224,11 @@ public void testIncludeGlobalState() throws Exception { assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> try restoring index and cluster state from snapshot without global state"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index") - .setWaitForCompletion(true) - .setRestoreGlobalState(false) - .get(); + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state-with-index" + ).setWaitForCompletion(true).setRestoreGlobalState(false).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index b2494c5bd2b91..773be25a4ca9d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -48,7 +48,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-index"); dataNodeClient().admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-index") .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index a5db88820f8d8..7f90b57204fc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -222,7 +222,7 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { // and succeeds final var snapshots = safeAwait( SubscribableListener.newForked( - l -> client().admin().cluster().getSnapshots(new GetSnapshotsRequest(repoName), l) + l -> client().admin().cluster().getSnapshots(new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, repoName), l) ) ).getSnapshots(); assertThat(snapshots, hasSize(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index c3da91bde254d..600a3953d9bda 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -85,13 +85,13 @@ public void testStatusApiConsistency() throws Exception { createFullSnapshot("test-repo", "test-snap"); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); - final List snapshotStatus = clusterAdmin().prepareSnapshotStatus("test-repo") + final List snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots(); @@ -121,7 +121,12 @@ public void testStatusAPICallInProgressSnapshot() throws Exception { awaitNumberOfSnapshotsInProgress(1); assertEquals( SnapshotsInProgress.State.STARTED, - clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).getState() + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap") + .get() + .getSnapshots() + .get(0) + .getState() ); logger.info("--> unblock all data nodes"); @@ -140,7 +145,10 @@ public void testExceptionOnMissingSnapBlob() throws IOException { logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap") + ); } public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { @@ -169,7 +177,10 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") ); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap")); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap") + ); } public void testGetSnapshotsWithoutIndices() throws Exception { @@ -177,12 +188,18 @@ public void testGetSnapshotsWithoutIndices() throws Exception { logger.info("--> snapshot"); final SnapshotInfo snapshotInfo = assertSuccessful( - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() ); assertThat(snapshotInfo.totalShards(), is(0)); logger.info("--> verify that snapshot without index shows up in non-verbose listing"); - final List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").setVerbose(false).get().getSnapshots(); + final List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setVerbose(false) + .get() + .getSnapshots(); assertThat(snapshotInfos, hasSize(1)); final SnapshotInfo found = snapshotInfos.get(0); assertThat(found.snapshotId(), is(snapshotInfo.snapshotId())); @@ -221,7 +238,7 @@ public void testCorrectCountsForDoneShards() throws Exception { final ActionFuture responseSnapshotOne = internalCluster().masterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotOne) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotOne) .setWaitForCompletion(true) .execute(); @@ -256,9 +273,11 @@ public void testCorrectCountsForDoneShards() throws Exception { blockDataNode(repoName, dataNodeTwo); final String snapshotTwo = "snap-2"; - final ActionFuture responseSnapshotTwo = clusterAdmin().prepareCreateSnapshot(repoName, snapshotTwo) - .setWaitForCompletion(true) - .execute(); + final ActionFuture responseSnapshotTwo = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotTwo + ).setWaitForCompletion(true).execute(); waitForBlock(dataNodeTwo, repoName); @@ -292,7 +311,7 @@ public void testCorrectCountsForDoneShards() throws Exception { public void testGetSnapshotsNoRepos() { ensureGreen(); - GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(new String[] { "_all" }) + GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, new String[] { "_all" }) .setSnapshots(randomFrom("_all", "*")) .get(); @@ -322,7 +341,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("fs") .setSettings(Settings.builder().put("location", repoPath).build()) ); @@ -340,7 +359,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> create snapshot with index {} and name {} in repository {}", snapshotIndex, snapshotName, repoName); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -358,7 +377,10 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> get and verify snapshots"); GetSnapshotsResponse getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(randomFrom(new String[] { "_all" }, new String[] { "repo*" }, repoList.toArray(new String[0]))) + .prepareGetSnapshots( + TEST_REQUEST_TIMEOUT, + randomFrom(new String[] { "_all" }, new String[] { "repo*" }, repoList.toArray(new String[0])) + ) .setSnapshots(randomFrom("_all", "*")) .get(); @@ -375,7 +397,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> specify all snapshot names with ignoreUnavailable=false"); GetSnapshotsResponse getSnapshotsResponse2 = client.admin() .cluster() - .prepareGetSnapshots(randomFrom("_all", "repo*")) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, randomFrom("_all", "repo*")) .setIgnoreUnavailable(false) .setSnapshots(snapshotList.toArray(new String[0])) .get(); @@ -387,7 +409,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> specify all snapshot names with ignoreUnavailable=true"); GetSnapshotsResponse getSnapshotsResponse3 = client.admin() .cluster() - .prepareGetSnapshots(randomFrom("_all", "repo*")) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, randomFrom("_all", "repo*")) .setIgnoreUnavailable(true) .setSnapshots(snapshotList.toArray(new String[0])) .get(); @@ -432,7 +454,7 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { return successShards == shards.size() - 1 && initShards == 1; }); - GetSnapshotsResponse response1 = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse response1 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); @@ -441,13 +463,13 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { SnapshotInfo snapshotInfo = snapshotInfoList.get(0); assertEquals(SnapshotState.IN_PROGRESS, snapshotInfo.state()); - SnapshotStatus snapshotStatus = clusterAdmin().prepareSnapshotStatus().get().getSnapshots().get(0); + SnapshotStatus snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get().getSnapshots().get(0); assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getTotalShards())); assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getDoneShards())); assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); String notExistedSnapshotName = "snapshot_not_exist"; - GetSnapshotsResponse response2 = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse response2 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(notExistedSnapshotName) .setIgnoreUnavailable(true) .get(); @@ -455,7 +477,9 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { expectThrows( SnapshotMissingException.class, - clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots(notExistedSnapshotName) + .setIgnoreUnavailable(false) ); logger.info("--> unblock all data nodes"); @@ -475,7 +499,9 @@ public void testSnapshotStatusOnFailedSnapshot() throws Exception { ensureGreen(); indexRandomDocs("test-idx-good", randomIntBetween(1, 5)); - final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshot).get(); + final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshot) + .get(); assertEquals(1, snapshotsStatusResponse.getSnapshots().size()); assertEquals(SnapshotsInProgress.State.FAILED, snapshotsStatusResponse.getSnapshots().get(0).getState()); } @@ -498,12 +524,12 @@ public void testGetSnapshotsRequest() throws Exception { logger.info("--> get snapshots on an empty repository"); expectThrows( SnapshotMissingException.class, - client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot") + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName).addSnapshots("non-existent-snapshot") ); // with ignore unavailable set to true, should not throw an exception GetSnapshotsResponse getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .setIgnoreUnavailable(true) .addSnapshots("non-existent-snapshot") .get(); @@ -521,14 +547,14 @@ public void testGetSnapshotsRequest() throws Exception { final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName); client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, "snap-on-empty-repo") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, "snap-on-empty-repo") .setWaitForCompletion(false) .setIndices(indexName) .get(); waitForBlock(initialBlockedNode, repositoryName); // wait for block to kick in getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo")) .get(); assertEquals(1, getSnapshotsResponse.getSnapshots().size()); @@ -543,7 +569,7 @@ public void testGetSnapshotsRequest() throws Exception { final String snapshotName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -563,7 +589,7 @@ public void testGetSnapshotsRequest() throws Exception { final String blockedNode = blockNodeWithIndex(repositoryName, indexName); client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, inProgressSnapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, inProgressSnapshot) .setWaitForCompletion(false) .setIndices(indexName) .get(); @@ -583,7 +609,7 @@ public void testGetSnapshotsRequest() throws Exception { } getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY)) .get(); List sortedNames = Arrays.asList(snapshotNames); @@ -591,7 +617,11 @@ public void testGetSnapshotsRequest() throws Exception { assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots)); assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames)); - getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).get(); + getSnapshotsResponse = client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) + .addSnapshots(snapshotNames) + .get(); sortedNames = Arrays.asList(snapshotNames); Collections.sort(sortedNames); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots)); @@ -604,7 +634,7 @@ public void testGetSnapshotsRequest() throws Exception { final String secondRegex = "*" + regexName.substring(splitPos); getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .addSnapshots(snapshotNames) .addSnapshots(firstRegex, secondRegex) .get(); @@ -644,7 +674,7 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { createsListener.map(ignored -> null) ); for (final var snapshotName : snapshotNames) { - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(waitForCompletion) .execute(createsGroupedListener); } @@ -655,16 +685,24 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { .info(ThreadPool.Names.SNAPSHOT_META) .getMax(); for (int i = 0; i < metaThreadPoolSize * 2; i++) { - statuses.add(dataNodeClient.admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshotNames).execute()); - gets.add(dataNodeClient.admin().cluster().prepareGetSnapshots(repoName).setSnapshots(snapshotNames).execute()); + statuses.add( + dataNodeClient.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); + gets.add( + dataNodeClient.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); } // ... and then some more status requests until all snapshots are done var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); assertBusy(() -> { final var stillRunning = SnapshotsInProgress.get(masterClusterService.state()).isEmpty() == false; - statuses.add(dataNodeClient.admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshotNames).execute()); - gets.add(dataNodeClient.admin().cluster().prepareGetSnapshots(repoName).setSnapshots(snapshotNames).execute()); + statuses.add( + dataNodeClient.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); + gets.add( + dataNodeClient.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); assertFalse(stillRunning); }, 60, TimeUnit.SECONDS); @@ -696,14 +734,16 @@ public void testInfiniteTimeout() throws Exception { indexRandomDocs("test-idx", 10); ensureGreen(); blockAllDataNodes("test-repo"); - final ActionFuture snapshotResponseFuture = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshotResponseFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).execute(); try { waitForBlockOnAnyDataNode("test-repo"); // Make sure that the create-snapshot task completes on master assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); - final List snapshotStatus = clusterAdmin().prepareSnapshotStatus("test-repo") + final List snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setMasterNodeTimeout(TimeValue.MINUS_ONE) .get() .getSnapshots(); @@ -724,7 +764,11 @@ private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotS private static SnapshotStatus getSnapshotStatus(String repoName, String snapshotName) { try { - return clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get().getSnapshots().get(0); + return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0); } catch (SnapshotMissingException e) { throw new AssertionError(e); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index b759993be26df..9bcddd5c58d66 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -579,6 +579,7 @@ && randomBoolean() closeIndicesStep.addListener(mustSucceed(ignored1 -> deleteIndicesStep.addListener(mustSucceed(ignored2 -> { final RestoreSnapshotRequestBuilder restoreSnapshotRequestBuilder = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -711,7 +712,12 @@ private void startCloner() { client.admin() .cluster() - .prepareCloneSnapshot(trackedSnapshot.trackedRepository.repositoryName, trackedSnapshot.snapshotName, cloneName) + .prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, + trackedSnapshot.trackedRepository.repositoryName, + trackedSnapshot.snapshotName, + cloneName + ) .setIndices(indexNames.toArray(new String[0])) .execute(mustSucceed(acknowledgedResponse -> { Releasables.close(releaseAll); @@ -759,7 +765,7 @@ private void startSnapshotDeleter() { client.admin() .cluster() - .prepareDeleteSnapshot(targetRepository.repositoryName, snapshotNames.toArray(new String[0])) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, targetRepository.repositoryName, snapshotNames.toArray(new String[0])) .execute(mustSucceed(acknowledgedResponse -> { assertTrue(acknowledgedResponse.isAcknowledged()); for (String snapshotName : snapshotNames) { @@ -828,7 +834,7 @@ private void startCleaner() { logger.info("--> starting cleanup of [{}]", trackedRepository.repositoryName); client.admin() .cluster() - .prepareCleanupRepository(trackedRepository.repositoryName) + .prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName) .execute(mustSucceed(cleanupRepositoryResponse -> { final RepositoryCleanupResult result = cleanupRepositoryResponse.result(); if (result.bytes() > 0L || result.blobs() > 0L) { @@ -838,7 +844,7 @@ private void startCleaner() { // concurrent operations on the repository. client.admin() .cluster() - .prepareCleanupRepository(trackedRepository.repositoryName) + .prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName) .execute(mustSucceed(secondCleanupRepositoryResponse -> { final RepositoryCleanupResult secondCleanupResult = secondCleanupRepositoryResponse.result(); if (secondCleanupResult.blobs() == 1) { @@ -933,6 +939,7 @@ private void startSnapshotter() { ); final CreateSnapshotRequestBuilder createSnapshotRequestBuilder = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName, snapshotName ); @@ -1026,6 +1033,7 @@ private void startPartialSnapshotter() { ); final CreateSnapshotRequestBuilder createSnapshotRequestBuilder = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName, snapshotName ).setPartial(true); @@ -1046,7 +1054,7 @@ private void startPartialSnapshotter() { final DeleteSnapshotRequestBuilder deleteSnapshotRequestBuilder = abortClient.admin() .cluster() - .prepareDeleteSnapshot(trackedRepository.repositoryName, snapshotName); + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName, snapshotName); final Releasable abortReleasable = abortReleasables.transfer(); @@ -1119,7 +1127,7 @@ private void pollForSnapshotCompletion( mustSucceed( () -> client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .setCurrentSnapshot() .execute(mustSucceed(getSnapshotsResponse -> { if (getSnapshotsResponse.getSnapshots() @@ -1365,7 +1373,7 @@ private void putRepositoryAndContinue(Client client, boolean nodeMightRestart, R logger.info("--> put repo [{}]", repositoryName); client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put(FsRepository.LOCATION_SETTING.getKey(), location)) .setVerify(nodeMightRestart == false) @@ -1402,11 +1410,12 @@ private void scheduleRemoveAndAdd() { final Releasable releaseAll = localReleasables.transfer(); logger.info("--> delete repo [{}]", repositoryName); - clusterAdmin().prepareDeleteRepository(repositoryName).execute(mustSucceed(acknowledgedResponse -> { - assertTrue(acknowledgedResponse.isAcknowledged()); - logger.info("--> finished delete repo [{}]", repositoryName); - putRepositoryAndContinue(client, nodeMightRestart, releaseAll); - })); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .execute(mustSucceed(acknowledgedResponse -> { + assertTrue(acknowledgedResponse.isAcknowledged()); + logger.info("--> finished delete repo [{}]", repositoryName); + putRepositoryAndContinue(client, nodeMightRestart, releaseAll); + })); replacingRepo = true; } finally { @@ -1708,7 +1717,7 @@ void getSnapshotInfo(Client client, ActionListener listener) { ); client.admin() .cluster() - .prepareGetSnapshots(trackedRepository.repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName) .setSnapshots(snapshotName) .execute(mustSucceed(getSnapshotsResponse -> { assertThat(getSnapshotsResponse.getSnapshots(), hasSize(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java index 4d2d310955a3d..c1b3203b15666 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java @@ -48,12 +48,11 @@ private Tuple testThrottledRepository(String maxSnapshotBytesPerSec, .put("max_restore_bytes_per_sec", maxRestoreBytesPerSec) ); createSnapshot("test-repo", "test-snap", Collections.singletonList("test-idx")); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setRenamePattern("test-") - .setRenameReplacement("test2-") - .setWaitForCompletion(true) - .execute() - .actionGet(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setRenamePattern("test-").setRenameReplacement("test2-").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 50L); long snapshotPause = 0L; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java index 854d5f39ddaad..6bdb70bfba3e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java @@ -118,7 +118,7 @@ public void testNoDoubleFinalization() throws Exception { .equals(Set.of(SnapshotsInProgress.ShardState.QUEUED, SnapshotsInProgress.ShardState.MISSING)) ); }); - clusterAdmin().prepareCreateSnapshot(repoName, "snap-2") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-2") .setIndices("index-2", "index-3") .setPartial(true) .setWaitForCompletion(false) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java index e68a60201931a..ca9fdc1284e83 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java @@ -124,7 +124,9 @@ public void testDeleteSnapshotWhenNotWaitingForCompletion() throws Exception { SubscribableListener snapshotDeletionListener = createSnapshotDeletionListener("test-repo"); repository.blockOnDataFiles(); try { - clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snapshot").setWaitForCompletion(false).execute(listener); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot") + .setWaitForCompletion(false) + .execute(listener); // The request will complete as soon as the deletion is scheduled safeGet(listener); // The deletion won't complete until the block is removed @@ -144,7 +146,9 @@ public void testDeleteSnapshotWhenWaitingForCompletion() throws Exception { SubscribableListener snapshotDeletionListener = createSnapshotDeletionListener("test-repo"); repository.blockOnDataFiles(); try { - clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snapshot").setWaitForCompletion(true).execute(requestCompleteListener); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot") + .setWaitForCompletion(true) + .execute(requestCompleteListener); // Neither the request nor the deletion will complete until we remove the block assertFalse(requestCompleteListener.isDone()); assertFalse(snapshotDeletionListener.isDone()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index 058d5af7d9c85..706ceaad7905c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -83,10 +83,11 @@ public void testRestoreSystemIndicesAsGlobalState() { assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore snapshot with global state, without closing the system index - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify only the original document is restored @@ -102,15 +103,15 @@ public void testSnapshotWithoutGlobalState() { indexDoc("not-a-system-index", "1", "purpose", "non system index doc"); // run a snapshot without global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(false) .get(); assertSnapshotSuccess(createSnapshotResponse); // check snapshot info for for which - clusterAdmin().prepareGetRepositories(REPO_NAME).get(); - Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, REPO_NAME).get(); + Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .get() .getSnapshots() .stream() @@ -132,7 +133,7 @@ public void testSnapshotByFeature() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot by feature - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setIncludeGlobalState(true) .setWaitForCompletion(true) .setFeatureStates(SystemIndexTestPlugin.class.getSimpleName(), AnotherSystemIndexTestPlugin.class.getSimpleName()) @@ -148,10 +149,11 @@ public void testSnapshotByFeature() { assertThat(getDocCount(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore indices as global state without closing the index - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify only the original document is restored @@ -175,7 +177,7 @@ public void testDefaultRestoreOnlyRegularIndices() { refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -184,7 +186,7 @@ public void testDefaultRestoreOnlyRegularIndices() { // Delete the regular index so we can restore it assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -207,7 +209,7 @@ public void testRestoreByFeature() { refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME, AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -225,10 +227,11 @@ public void testRestoreByFeature() { assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); // restore indices by feature - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setFeatureStates("SystemIndexTestPlugin") - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setFeatureStates("SystemIndexTestPlugin").get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify that the restored system index has only one document @@ -253,14 +256,14 @@ public void testSnapshotAndRestoreAssociatedIndices() { refresh(regularIndex, AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME, AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME); // snapshot - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setFeatureStates(AssociatedIndicesTestPlugin.class.getSimpleName()) .setWaitForCompletion(true) .get(); assertSnapshotSuccess(createSnapshotResponse); // verify the correctness of the snapshot - Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) + Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .get() .getSnapshots() .stream() @@ -282,7 +285,11 @@ public void testSnapshotAndRestoreAssociatedIndices() { assertAcked(indicesAdmin().prepareDelete(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME).get()); // restore the feature state and its associated index - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ) .setIndices(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME) .setWaitForCompletion(true) .setFeatureStates(AssociatedIndicesTestPlugin.class.getSimpleName()) @@ -303,7 +310,7 @@ public void testRestoreFeatureNotInSnapshot() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -312,7 +319,7 @@ public void testRestoreFeatureNotInSnapshot() { final String fakeFeatureStateName = "NonExistentTestPlugin"; SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setFeatureStates("SystemIndexTestPlugin", fakeFeatureStateName) ); @@ -331,7 +338,7 @@ public void testSnapshottingSystemIndexByNameIsRejected() throws Exception { IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) @@ -345,7 +352,7 @@ public void testSnapshottingSystemIndexByNameIsRejected() throws Exception { ); // And create a successful snapshot so we don't upset the test framework - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -362,7 +369,7 @@ public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessExcep refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -374,7 +381,7 @@ public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessExcep IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) ); @@ -398,7 +405,7 @@ public void testSystemIndicesCannotBeRenamed() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -407,7 +414,7 @@ public void testSystemIndicesCannotBeRenamed() { assertAcked(indicesAdmin().prepareDelete(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, nonSystemIndex).get()); // Restore using a rename pattern that matches both the regular and the system index - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) .setRenamePattern(".test-(.+)") @@ -433,7 +440,7 @@ public void testRestoreSystemIndicesAsGlobalStateWithDefaultFeatureStateList() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -446,10 +453,11 @@ public void testRestoreSystemIndicesAsGlobalStateWithDefaultFeatureStateList() { assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore indices as global state a null list of feature states - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify that the system index is destroyed @@ -468,7 +476,7 @@ public void testRestoreSystemIndicesAsGlobalStateWithNoFeatureStates() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, regularIndex); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -482,11 +490,11 @@ public void testRestoreSystemIndicesAsGlobalStateWithNoFeatureStates() { assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore with global state and all indices but explicitly no feature states. - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .setFeatureStates(new String[] { randomFrom("none", "NONE") }) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).setFeatureStates(new String[] { randomFrom("none", "NONE") }).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify that the system index still has the updated document, i.e. has not been restored @@ -516,7 +524,7 @@ public void testAllSystemIndicesAreRemovedWhenThatFeatureStateIsRestored() { indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc"); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -535,11 +543,11 @@ public void testAllSystemIndicesAreRemovedWhenThatFeatureStateIsRestored() { assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); // restore the snapshot - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setFeatureStates("SystemIndexTestPlugin") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setFeatureStates("SystemIndexTestPlugin").setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // The index we created after the snapshot should be gone @@ -567,7 +575,7 @@ public void testSystemIndexAliasesAreAlwaysRestored() { assertAcked(indicesAdmin().prepareAliases().addAlias(systemIndexName, systemIndexAlias).addAlias(regularIndex, regularAlias).get()); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -577,12 +585,11 @@ public void testSystemIndexAliasesAreAlwaysRestored() { assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex, systemIndexName)); // Now restore the snapshot with no aliases - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setFeatureStates("SystemIndexTestPlugin") - .setWaitForCompletion(true) - .setRestoreGlobalState(false) - .setIncludeAliases(false) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setFeatureStates("SystemIndexTestPlugin").setWaitForCompletion(true).setRestoreGlobalState(false).setIncludeAliases(false).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // The regular index should exist @@ -608,7 +615,7 @@ public void testNoneFeatureStateMustBeAlone() { // run a snapshot including global state IllegalArgumentException createEx = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none", "AnotherSystemIndexTestPlugin") @@ -622,7 +629,7 @@ public void testNoneFeatureStateMustBeAlone() { ); // create a successful snapshot with global state/all features - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -630,7 +637,7 @@ public void testNoneFeatureStateMustBeAlone() { SnapshotRestoreException restoreEx = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none") @@ -659,7 +666,7 @@ public void testNoneFeatureStateOnCreation() { indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .setFeatureStates(randomFrom("none", "NONE")) @@ -667,7 +674,7 @@ public void testNoneFeatureStateOnCreation() { assertSnapshotSuccess(createSnapshotResponse); // Verify that the system index was not included - Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) + Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .get() .getSnapshots() .stream() @@ -704,18 +711,18 @@ public void testPartialSnapshotsOfSystemIndexRemovesFeatureState() throws Except // Start a snapshot and wait for it to hit the block, then kill the master to force a failover final String partialSnapName = "test-partial-snap"; - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, partialSnapName) - .setIncludeGlobalState(true) - .setWaitForCompletion(false) - .setPartial(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + partialSnapName + ).setIncludeGlobalState(true).setWaitForCompletion(false).setPartial(true).get(); assertThat(createSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); waitForBlock(internalCluster().getMasterName(), REPO_NAME); internalCluster().stopCurrentMasterNode(); // Now get the snapshot and do our checks assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO_NAME) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .setSnapshots(partialSnapName) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -761,11 +768,11 @@ public void testParallelIndexDeleteRemovesFeatureState() throws Exception { // Start a snapshot - need to do this async because some blocks will block this call logger.info("--> Blocked repo, starting snapshot..."); final String partialSnapName = "test-partial-snap"; - ActionFuture createSnapshotFuture = clusterAdmin().prepareCreateSnapshot(REPO_NAME, partialSnapName) - .setIncludeGlobalState(true) - .setWaitForCompletion(true) - .setPartial(true) - .execute(); + ActionFuture createSnapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + partialSnapName + ).setIncludeGlobalState(true).setWaitForCompletion(true).setPartial(true).execute(); logger.info("--> Started snapshot, waiting for block..."); waitForBlock(dataNodes.get(1), REPO_NAME); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index ec02b8a45cd42..0f9c77e810924 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -196,6 +196,7 @@ static TransportVersion def(int id) { public static final TransportVersion VERSION_SUPPORTING_SPARSE_VECTOR_STATS = def(8_687_00_0); public static final TransportVersion ML_AD_OUTPUT_MEMORY_ALLOCATOR_FIELD = def(8_688_00_0); public static final TransportVersion FAILURE_STORE_LAZY_CREATION = def(8_689_00_0); + public static final TransportVersion SNAPSHOT_REQUEST_TIMEOUTS = def(8_690_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index d0a71d8a94f58..e4615b28af749 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -7,10 +7,12 @@ */ package org.elasticsearch.action.admin.cluster.repositories.cleanup; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -20,18 +22,34 @@ public class CleanupRepositoryRequest extends AcknowledgedRequest { - public CleanupRepositoryRequestBuilder(ElasticsearchClient client, String repository) { - super(client, TransportCleanupRepositoryAction.TYPE, new CleanupRepositoryRequest(repository)); + public CleanupRepositoryRequestBuilder( + ElasticsearchClient client, + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String repository + ) { + super(client, TransportCleanupRepositoryAction.TYPE, new CleanupRepositoryRequest(masterNodeTimeout, ackTimeout, repository)); } public CleanupRepositoryRequestBuilder setName(String repository) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index 4892efaf5ae1f..237e241c8900f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -80,7 +80,7 @@ public TransportCleanupRepositoryAction( clusterService, threadPool, actionFilters, - CleanupRepositoryRequest::new, + CleanupRepositoryRequest::readFrom, indexNameExpressionResolver, CleanupRepositoryResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 17543ba44ae14..cf2317fc143e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -31,17 +32,13 @@ public DeleteRepositoryRequest(StreamInput in) throws IOException { name = in.readString(); } - public DeleteRepositoryRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); - } - /** * Constructs a new unregister repository request with the provided name. * * @param name name of the repository */ - public DeleteRepositoryRequest(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public DeleteRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(masterNodeTimeout, ackTimeout); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index 6accb02418df8..22983504e33a2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; /** * Builder for unregister repository request @@ -23,8 +24,8 @@ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< /** * Constructs unregister repository request builder with specified repository name */ - public DeleteRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(name)); + public DeleteRepositoryRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(client, TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(masterNodeTimeout, ackTimeout, name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index 6d7a51420a34b..c8670c40f6aed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -10,9 +10,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadRequest; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -23,11 +23,7 @@ */ public class GetRepositoriesRequest extends MasterNodeReadRequest { - private String[] repositories = Strings.EMPTY_ARRAY; - - public GetRepositoriesRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - } + private String[] repositories; /** * Constructs a new get repositories request with a list of repositories. @@ -35,10 +31,10 @@ public GetRepositoriesRequest() { * If the list of repositories is empty or it contains a single element "_all", all registered repositories * are returned. * - * @param repositories list of repositories + * @param repositories list of repositories */ - public GetRepositoriesRequest(String[] repositories) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetRepositoriesRequest(TimeValue masterNodeTimeout, String[] repositories) { + super(masterNodeTimeout); this.repositories = repositories; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index f9d0c429c2ee8..04a7655a60e4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.core.TimeValue; /** * Get repository request builder @@ -23,8 +24,8 @@ public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationReques /** * Creates new get repository request builder */ - public GetRepositoriesRequestBuilder(ElasticsearchClient client, String... repositories) { - super(client, GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(repositories)); + public GetRepositoriesRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, String... repositories) { + super(client, GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(masterNodeTimeout, repositories)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 9cee77969eb9a..cda1df3dc1c2c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -47,15 +48,15 @@ public PutRepositoryRequest(StreamInput in) throws IOException { verify = in.readBoolean(); } - public PutRepositoryRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public PutRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } /** * Constructs a new put repository request with the provided name. */ - public PutRepositoryRequest(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public PutRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + this(masterNodeTimeout, ackTimeout); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 86ed38c2ddad9..78ad0dbdfa999 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentType; import java.util.Map; @@ -27,8 +28,8 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< /** * Constructs register repository request for the repository with a given name */ - public PutRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, TransportPutRepositoryAction.TYPE, new PutRepositoryRequest(name)); + public PutRepositoryRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(client, TransportPutRepositoryAction.TYPE, new PutRepositoryRequest(masterNodeTimeout, ackTimeout, name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java index 59f254cf3636a..d3dc7c916f066 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java @@ -82,7 +82,7 @@ public TransformState transform(Object source, TransformState prevState) throws toDelete.removeAll(entities); for (var repositoryToDelete : toDelete) { - var task = new RepositoriesService.UnregisterRepositoryTask(repositoryToDelete); + var task = new RepositoriesService.UnregisterRepositoryTask(DUMMY_TIMEOUT, repositoryToDelete); state = task.execute(state); } @@ -97,7 +97,7 @@ public List fromXContent(XContentParser parser) throws IOE Map source = parser.map(); for (var entry : source.entrySet()) { - PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(entry.getKey()); + PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(DUMMY_TIMEOUT, DUMMY_TIMEOUT, entry.getKey()); @SuppressWarnings("unchecked") Map content = (Map) entry.getValue(); try (XContentParser repoParser = mapToXContentParser(XContentParserConfiguration.EMPTY, content)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index 971e1af5ea1bd..0876e48f929c8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -29,17 +30,17 @@ public VerifyRepositoryRequest(StreamInput in) throws IOException { name = in.readString(); } - public VerifyRepositoryRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public VerifyRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } /** * Constructs a new unregister repository request with the provided name. * - * @param name name of the repository + * @param name name of the repository */ - public VerifyRepositoryRequest(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public VerifyRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + this(masterNodeTimeout, ackTimeout); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 798fad15734ed..d756ca93133c3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; /** * Builder for verify repository request @@ -22,8 +23,8 @@ public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBu /** * Constructs unregister repository request builder with specified repository name */ - public VerifyRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, VerifyRepositoryAction.INSTANCE, new VerifyRepositoryRequest(name)); + public VerifyRepositoryRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(client, VerifyRepositoryAction.INSTANCE, new VerifyRepositoryRequest(masterNodeTimeout, ackTimeout, name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index 2c7f1a703b64d..dafd35f09f12f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -52,8 +53,8 @@ public CloneSnapshotRequest(StreamInput in) throws IOException { * @param target target snapshot name * @param indices indices to clone from source to target */ - public CloneSnapshotRequest(String repository, String source, String target, String[] indices) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public CloneSnapshotRequest(TimeValue masterNodeTimeout, String repository, String source, String target, String[] indices) { + super(masterNodeTimeout); this.repository = repository; this.source = source; this.target = target; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index 818f0fadf92ef..a2726505dd834 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -13,14 +13,25 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.TimeValue; public class CloneSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< CloneSnapshotRequest, AcknowledgedResponse, CloneSnapshotRequestBuilder> { - public CloneSnapshotRequestBuilder(ElasticsearchClient client, String repository, String source, String target) { - super(client, TransportCloneSnapshotAction.TYPE, new CloneSnapshotRequest(repository, source, target, Strings.EMPTY_ARRAY)); + public CloneSnapshotRequestBuilder( + ElasticsearchClient client, + TimeValue masterNodeTimeout, + String repository, + String source, + String target + ) { + super( + client, + TransportCloneSnapshotAction.TYPE, + new CloneSnapshotRequest(masterNodeTimeout, repository, source, target, Strings.EMPTY_ARRAY) + ); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index c2fd49eb91a42..2c460319e3d86 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -81,8 +82,8 @@ public class CreateSnapshotRequest extends MasterNodeRequest userMetadata; - public CreateSnapshotRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public CreateSnapshotRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } /** @@ -91,8 +92,8 @@ public CreateSnapshotRequest() { * @param repository repository name * @param snapshot snapshot name */ - public CreateSnapshotRequest(String repository, String snapshot) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public CreateSnapshotRequest(TimeValue masterNodeTimeout, String repository, String snapshot) { + this(masterNodeTimeout); this.snapshot = snapshot; this.repository = repository; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 7f093b577fd57..983bb6e5d3a7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import java.util.Map; @@ -26,8 +27,8 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil /** * Constructs a new create snapshot request builder with specified repository and snapshot names */ - public CreateSnapshotRequestBuilder(ElasticsearchClient client, String repository, String snapshot) { - super(client, TransportCreateSnapshotAction.TYPE, new CreateSnapshotRequest(repository, snapshot)); + public CreateSnapshotRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, String repository, String snapshot) { + super(client, TransportCreateSnapshotAction.TYPE, new CreateSnapshotRequest(masterNodeTimeout, repository, snapshot)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index 2356087d64e41..771bab8d4d6b2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Arrays; @@ -39,8 +40,8 @@ public class DeleteSnapshotRequest extends MasterNodeRequest { - public GetSnapshottableFeaturesRequest() { - - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetSnapshottableFeaturesRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public GetSnapshottableFeaturesRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java index 0dcd5762b0b08..ca867625272f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -22,8 +23,8 @@ public static ResetFeatureStateRequest fromStream(StreamInput in) throws IOExcep return new ResetFeatureStateRequest(in); } - public ResetFeatureStateRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public ResetFeatureStateRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } private ResetFeatureStateRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 2aa64fcf0bad6..8ef828d07d8b0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -76,8 +77,8 @@ public class GetSnapshotsRequest extends MasterNodeRequest private boolean includeIndexNames = true; - public GetSnapshotsRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetSnapshotsRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } /** @@ -86,9 +87,8 @@ public GetSnapshotsRequest() { * @param repositories repository names * @param snapshots list of snapshots */ - public GetSnapshotsRequest(String[] repositories, String[] snapshots) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - this.repositories = repositories; + public GetSnapshotsRequest(TimeValue masterNodeTimeout, String[] repositories, String[] snapshots) { + this(masterNodeTimeout, repositories); this.snapshots = snapshots; } @@ -97,8 +97,8 @@ public GetSnapshotsRequest(String[] repositories, String[] snapshots) { * * @param repositories repository names */ - public GetSnapshotsRequest(String... repositories) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetSnapshotsRequest(TimeValue masterNodeTimeout, String... repositories) { + this(masterNodeTimeout); this.repositories = repositories; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index f3ef2fa0bda1e..54583a3ba23da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.sort.SortOrder; /** @@ -25,8 +26,8 @@ public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilde /** * Constructs the new get snapshot request with specified repositories */ - public GetSnapshotsRequestBuilder(ElasticsearchClient client, String... repositories) { - super(client, TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(repositories)); + public GetSnapshotsRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, String... repositories) { + super(client, TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(masterNodeTimeout, repositories)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java index 7a7cc0c304556..96bedfdbfd108 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -28,8 +29,8 @@ public class GetShardSnapshotRequest extends MasterNodeRequest repositories; private final ShardId shardId; - GetShardSnapshotRequest(List repositories, ShardId shardId) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + GetShardSnapshotRequest(TimeValue masterNodeTimeout, List repositories, ShardId shardId) { + super(masterNodeTimeout); assert repositories.isEmpty() == false; assert repositories.stream().noneMatch(Objects::isNull); assert repositories.size() == 1 || repositories.stream().noneMatch(repo -> repo.equals(ALL_REPOSITORIES)); @@ -50,11 +51,15 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); } - public static GetShardSnapshotRequest latestSnapshotInAllRepositories(ShardId shardId) { - return new GetShardSnapshotRequest(Collections.singletonList(ALL_REPOSITORIES), shardId); + public static GetShardSnapshotRequest latestSnapshotInAllRepositories(TimeValue masterNodeTimeout, ShardId shardId) { + return new GetShardSnapshotRequest(masterNodeTimeout, Collections.singletonList(ALL_REPOSITORIES), shardId); } - public static GetShardSnapshotRequest latestSnapshotInRepositories(ShardId shardId, List repositories) { + public static GetShardSnapshotRequest latestSnapshotInRepositories( + TimeValue masterNodeTimeout, + ShardId shardId, + List repositories + ) { if (repositories.isEmpty()) { throw new IllegalArgumentException("Expected at least 1 repository but got none"); } @@ -62,7 +67,7 @@ public static GetShardSnapshotRequest latestSnapshotInRepositories(ShardId shard if (repositories.stream().anyMatch(Objects::isNull)) { throw new NullPointerException("null values are not allowed in the repository list"); } - return new GetShardSnapshotRequest(repositories, shardId); + return new GetShardSnapshotRequest(masterNodeTimeout, repositories, shardId); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 674fe117410e5..f0d47813dad77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -63,8 +64,13 @@ public class RestoreSnapshotRequest extends MasterNodeRequest { + private static final Logger logger = LogManager.getLogger(LazyRolloverAction.class); + public static final NodeFeature DATA_STREAM_LAZY_ROLLOVER = new NodeFeature("data_stream.rollover.lazy"); public static final LazyRolloverAction INSTANCE = new LazyRolloverAction(); @@ -50,6 +67,8 @@ public String name() { public static final class TransportLazyRolloverAction extends TransportRolloverAction { + private final MasterServiceTaskQueue lazyRolloverTaskQueue; + @Inject public TransportLazyRolloverAction( TransportService transportService, @@ -76,6 +95,11 @@ public TransportLazyRolloverAction( metadataDataStreamsService, dataStreamAutoShardingService ); + this.lazyRolloverTaskQueue = clusterService.createTaskQueue( + "lazy-rollover", + Priority.NORMAL, + new LazyRolloverExecutor(clusterService, allocationService, rolloverService, threadPool) + ); } @Override @@ -93,13 +117,20 @@ protected void masterOperation( : "The auto rollover action does not expect any other parameters in the request apart from the data stream name"; Metadata metadata = clusterState.metadata(); + DataStream dataStream = metadata.dataStreams().get(rolloverRequest.getRolloverTarget()); + // Skip submitting the task if we detect that the lazy rollover has been already executed. + if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { + DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + listener.onResponse(noopLazyRolloverResponse(targetIndices)); + return; + } // We evaluate the names of the source index as well as what our newly created index would be. final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, rolloverRequest.getRolloverTarget(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices() + rolloverRequest.targetsFailureStore() ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); @@ -107,24 +138,174 @@ protected void masterOperation( assert metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()) : "Auto-rollover applies only to data streams"; - final RolloverResponse trialRolloverResponse = new RolloverResponse( - trialSourceIndexName, - trialRolloverIndexName, - Map.of(), - false, - false, - false, - false, - false - ); - String source = "lazy_rollover source [" + trialSourceIndexName + "] to target [" + trialRolloverIndexName + "]"; // We create a new rollover request to ensure that it doesn't contain any other parameters apart from the data stream name // This will provide a more resilient user experience var newRolloverRequest = new RolloverRequest(rolloverRequest.getRolloverTarget(), null); newRolloverRequest.setIndicesOptions(rolloverRequest.indicesOptions()); - RolloverTask rolloverTask = new RolloverTask(newRolloverRequest, null, trialRolloverResponse, null, listener); - submitRolloverTask(rolloverRequest, source, rolloverTask); + LazyRolloverTask rolloverTask = new LazyRolloverTask(newRolloverRequest, listener); + lazyRolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); + } + } + + /** + * A lazy rollover task holds the rollover request and the listener. + */ + record LazyRolloverTask(RolloverRequest rolloverRequest, ActionListener listener) + implements + ClusterStateTaskListener { + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); } } + + /** + * Performs a lazy rollover when required and notifies the listener. Due to the nature of the lazy rollover we are able + * to perform certain optimisations like identifying duplicate requests and executing them once. This is an optimisation + * that can work since we do not take into consideration any stats or auto-sharding conditions here. + */ + record LazyRolloverExecutor( + ClusterService clusterService, + AllocationService allocationService, + MetadataRolloverService rolloverService, + ThreadPool threadPool + ) implements ClusterStateTaskExecutor { + + @Override + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + final var listener = new AllocationActionMultiListener(threadPool.getThreadContext()); + final var results = new ArrayList(batchExecutionContext.taskContexts().size()); + var state = batchExecutionContext.initialState(); + Map>> groupedRequests = new HashMap<>(); + for (final var taskContext : batchExecutionContext.taskContexts()) { + groupedRequests.computeIfAbsent(taskContext.getTask().rolloverRequest(), ignored -> new ArrayList<>()).add(taskContext); + } + for (final var entry : groupedRequests.entrySet()) { + List> rolloverTaskContexts = entry.getValue(); + try { + RolloverRequest rolloverRequest = entry.getKey(); + state = executeTask(state, rolloverRequest, results, rolloverTaskContexts, listener); + } catch (Exception e) { + rolloverTaskContexts.forEach(taskContext -> taskContext.onFailure(e)); + } finally { + rolloverTaskContexts.forEach(taskContext -> taskContext.captureResponseHeaders().close()); + } + } + + if (state != batchExecutionContext.initialState()) { + var reason = new StringBuilder(); + Strings.collectionToDelimitedStringWithLimit( + (Iterable) () -> Iterators.map(results.iterator(), t -> t.sourceIndexName() + "->" + t.rolloverIndexName()), + ",", + "lazy bulk rollover [", + "]", + 1024, + reason + ); + try (var ignored = batchExecutionContext.dropHeadersContext()) { + state = allocationService.reroute(state, reason.toString(), listener.reroute()); + } + } else { + listener.noRerouteNeeded(); + } + return state; + } + + public ClusterState executeTask( + ClusterState currentState, + RolloverRequest rolloverRequest, + List results, + List> rolloverTaskContexts, + AllocationActionMultiListener allocationActionMultiListener + ) throws Exception { + + // If the data stream has been rolled over since it was marked for lazy rollover, this operation is a noop + final DataStream dataStream = currentState.metadata().dataStreams().get(rolloverRequest.getRolloverTarget()); + assert dataStream != null; + + if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { + final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + var noopResponse = noopLazyRolloverResponse(targetIndices); + notifyAllListeners(rolloverTaskContexts, context -> context.getTask().listener.onResponse(noopResponse)); + return currentState; + } + + // Perform the actual rollover + final var rolloverResult = rolloverService.rolloverClusterState( + currentState, + rolloverRequest.getRolloverTarget(), + rolloverRequest.getNewIndexName(), + rolloverRequest.getCreateIndexRequest(), + List.of(), + Instant.now(), + false, + false, + null, + null, + rolloverRequest.targetsFailureStore() + ); + results.add(rolloverResult); + logger.trace("lazy rollover result [{}]", rolloverResult); + + final var rolloverIndexName = rolloverResult.rolloverIndexName(); + final var sourceIndexName = rolloverResult.sourceIndexName(); + + final var waitForActiveShardsTimeout = rolloverRequest.masterNodeTimeout().millis() < 0 + ? null + : rolloverRequest.masterNodeTimeout(); + + notifyAllListeners(rolloverTaskContexts, context -> { + // Now assuming we have a new state and the name of the rolled over index, we need to wait for the configured number of + // active shards, as well as return the names of the indices that were rolled/created + ActiveShardsObserver.waitForActiveShards( + clusterService, + new String[] { rolloverIndexName }, + rolloverRequest.getCreateIndexRequest().waitForActiveShards(), + waitForActiveShardsTimeout, + allocationActionMultiListener.delay(context.getTask().listener()) + .map( + isShardsAcknowledged -> new RolloverResponse( + // Note that we use the actual rollover result for these, because even though we're single threaded, + // it's possible for the rollover names generated before the actual rollover to be different due to + // things like date resolution + sourceIndexName, + rolloverIndexName, + Map.of(), + false, + true, + true, + isShardsAcknowledged, + false + ) + ) + ); + }); + + // Return the new rollover cluster state, which includes the changes that create the new index + return rolloverResult.clusterState(); + } + } + + /** + * A lazy rollover is only needed if the data stream is marked to rollover on write or if it targets the failure store + * and the failure store is empty. + */ + private static boolean isLazyRolloverNeeded(DataStream dataStream, boolean failureStore) { + DataStream.DataStreamIndices indices = dataStream.getDataStreamIndices(failureStore); + return indices.isRolloverOnWrite() || (failureStore && indices.getIndices().isEmpty()); + } + + private static void notifyAllListeners( + List> taskContexts, + Consumer> onPublicationSuccess + ) { + taskContexts.forEach(context -> context.success(() -> onPublicationSuccess.accept(context))); + } + + private static RolloverResponse noopLazyRolloverResponse(DataStream.DataStreamIndices indices) { + String latestWriteIndex = indices.getWriteIndex().getName(); + return new RolloverResponse(latestWriteIndex, latestWriteIndex, Map.of(), false, false, true, true, false); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index dea772cc893f2..6302b1c9ef9fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.RestApiVersion; @@ -182,6 +183,13 @@ public IndicesOptions indicesOptions() { return indicesOptions; } + /** + * @return true of the rollover request targets the failure store, false otherwise. + */ + public boolean targetsFailureStore() { + return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.failureStoreOptions().includeFailureIndices(); + } + public void setIndicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; } @@ -192,14 +200,14 @@ public boolean includeDataStreams() { } /** - * Sets the rollover target to rollover to another index + * Sets the rollover target to roll over to another index */ public void setRolloverTarget(String rolloverTarget) { this.rolloverTarget = rolloverTarget; } /** - * Sets the alias to rollover to another index + * Sets the alias to roll over to another index */ public void setNewIndexName(String newIndexName) { this.newIndexName = newIndexName; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 34da6795cd5f2..d76cfedd279b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -169,7 +169,7 @@ protected void masterOperation( assert task instanceof CancellableTask; Metadata metadata = clusterState.metadata(); // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. - boolean targetFailureStore = rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices(); + boolean targetFailureStore = rolloverRequest.targetsFailureStore(); final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, rolloverRequest.getRolloverTarget(), @@ -335,7 +335,7 @@ protected void masterOperation( rolloverAutoSharding, delegate ); - submitRolloverTask(rolloverRequest, source, rolloverTask); + rolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } else { // conditions not met delegate.onResponse(trialRolloverResponse); @@ -375,10 +375,6 @@ private void initializeFailureStore( String source = "initialize_failure_store with index [" + trialRolloverIndexName + "]"; RolloverTask rolloverTask = new RolloverTask(rolloverRequest, null, trialRolloverResponse, null, listener); - submitRolloverTask(rolloverRequest, source, rolloverTask); - } - - void submitRolloverTask(RolloverRequest rolloverRequest, String source, RolloverTask rolloverTask) { rolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } @@ -501,7 +497,7 @@ public ClusterState executeTask( rolloverRequest.getRolloverTarget(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices() + rolloverRequest.targetsFailureStore() ); // Re-evaluate the conditions, now with our final source index name @@ -552,7 +548,7 @@ public ClusterState executeTask( false, sourceIndexStats, rolloverTask.autoShardingResult(), - rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices() + rolloverRequest.targetsFailureStore() ); results.add(rolloverResult); logger.trace("rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index e0a28e635a0a3..b9f753189c077 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -214,12 +214,12 @@ private void rollOverFailureStores(Runnable runnable) { @Override public void onResponse(RolloverResponse result) { - // A successful response has rolled_over false when in the following cases: - // - A request had the parameter lazy or dry_run enabled - // - A request had conditions that were not met - // Since none of the above apply, getting a response with rolled_over false is considered a bug - // that should be caught here and inform the developer. - assert result.isRolledOver() : "An successful lazy rollover should always result in a rolled over data stream"; + logger.debug( + "Data stream failure store {} has {} over, the latest index is {}", + dataStream, + result.isRolledOver() ? "been successfully rolled" : "skipped rolling", + result.getNewIndex() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 4fc17407ae6d0..b14a63362cb9f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -534,12 +534,13 @@ private void rollOverDataStreams( @Override public void onResponse(RolloverResponse result) { - // A successful response has rolled_over false when in the following cases: - // - A request had the parameter lazy or dry_run enabled - // - A request had conditions that were not met - // Since none of the above apply, getting a response with rolled_over false is considered a bug - // that should be caught here and inform the developer. - assert result.isRolledOver() : "An successful lazy rollover should always result in a rolled over data stream"; + logger.debug( + "Data stream{} {} has {} over, the latest index is {}", + rolloverRequest.targetsFailureStore() ? " failure store" : "", + dataStream, + result.isRolledOver() ? "been successfully rolled" : "skipped rolling", + result.getNewIndex() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 4e42de57d08d3..f14a2f6fb5247 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -122,8 +122,11 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -295,28 +298,46 @@ public void putRepository(PutRepositoryRequest request, ActionListener listener) { execute(TransportDeleteRepositoryAction.TYPE, request, listener); } + public DeleteRepositoryRequestBuilder prepareDeleteRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + return new DeleteRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, name); + } + + @Deprecated(forRemoval = true) // temporary compatibility shim public DeleteRepositoryRequestBuilder prepareDeleteRepository(String name) { - return new DeleteRepositoryRequestBuilder(this, name); + return prepareDeleteRepository( + MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, + AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + name + ); } public void getRepositories(GetRepositoriesRequest request, ActionListener listener) { execute(GetRepositoriesAction.INSTANCE, request, listener); } - public GetRepositoriesRequestBuilder prepareGetRepositories(String... name) { - return new GetRepositoriesRequestBuilder(this, name); + public GetRepositoriesRequestBuilder prepareGetRepositories(TimeValue masterNodeTimeout, String... name) { + return new GetRepositoriesRequestBuilder(this, masterNodeTimeout, name); } - public CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository) { - return new CleanupRepositoryRequestBuilder(this, repository); + public CleanupRepositoryRequestBuilder prepareCleanupRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String repository) { + return new CleanupRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, repository); } public void cleanupRepository(CleanupRepositoryRequest request, ActionListener listener) { @@ -327,8 +348,8 @@ public void verifyRepository(VerifyRepositoryRequest request, ActionListener createSnapshot(CreateSnapshotRequest request) { @@ -339,12 +360,17 @@ public void createSnapshot(CreateSnapshotRequest request, ActionListener listener) { @@ -355,16 +381,26 @@ public void getSnapshots(GetSnapshotsRequest request, ActionListener listener) { execute(TransportDeleteSnapshotAction.TYPE, request, listener); } + @Deprecated(forRemoval = true) // temporary compatibility shim public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String... names) { - return new DeleteSnapshotRequestBuilder(this, repository, names); + return prepareDeleteSnapshot(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, repository, names); + } + + public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(TimeValue masterNodeTimeout, String repository, String... names) { + return new DeleteSnapshotRequestBuilder(this, masterNodeTimeout, repository, names); } public ActionFuture restoreSnapshot(RestoreSnapshotRequest request) { @@ -375,20 +411,25 @@ public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener) { execute(TransportSnapshotsStatusAction.TYPE, request, listener); } - public SnapshotsStatusRequestBuilder prepareSnapshotStatus(String repository) { - return new SnapshotsStatusRequestBuilder(this, repository); + public SnapshotsStatusRequestBuilder prepareSnapshotStatus(TimeValue masterNodeTimeout, String repository) { + return new SnapshotsStatusRequestBuilder(this, masterNodeTimeout, repository); } - public SnapshotsStatusRequestBuilder prepareSnapshotStatus() { - return new SnapshotsStatusRequestBuilder(this); + public SnapshotsStatusRequestBuilder prepareSnapshotStatus(TimeValue masterNodeTimeout) { + return new SnapshotsStatusRequestBuilder(this, masterNodeTimeout); } public void putPipeline(PutPipelineRequest request, ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index affc331c5ab49..6d99874fd2edb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -805,9 +805,7 @@ static void validateLifecycle( ComposableIndexTemplate template, @Nullable DataStreamGlobalRetention globalRetention ) { - DataStreamLifecycle lifecycle = template.template() != null && template.template().lifecycle() != null - ? template.template().lifecycle() - : resolveLifecycle(template, metadata.componentTemplates()); + DataStreamLifecycle lifecycle = resolveLifecycle(template, metadata.componentTemplates()); if (lifecycle != null) { if (template.getDataStreamTemplate() == null) { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 28df2fad32cbb..9998cb55064e3 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -748,24 +748,4 @@ public static XContentParser mapToXContentParser(XContentParserConfiguration con throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); } } - - /** - * Drains all data available via this parser into a provided builder. - * Provided parser is closed as a result. - * @param parser - * @param destination - */ - public static void drainAndClose(XContentParser parser, XContentBuilder destination) throws IOException { - if (parser.isClosed()) { - throw new IllegalStateException("Can't drain a parser that is closed"); - } - - XContentParser.Token token; - do { - destination.copyCurrentStructure(parser); - token = parser.nextToken(); - } while (token != null); - - parser.close(); - } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoader.java new file mode 100644 index 0000000000000..efc3c7b507300 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompositeSyntheticFieldLoader.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; + +/** + * A {@link SourceLoader.SyntheticFieldLoader} that uses a set of sub-loaders + * to produce synthetic source for the field. + * Typical use case is to gather field values from doc_values and append malformed values + * stored in a different field in case of ignore_malformed being enabled. + */ +public class CompositeSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private final String fieldName; + private final String fullFieldName; + private final SyntheticFieldLoaderLayer[] parts; + private boolean hasValue; + + public CompositeSyntheticFieldLoader(String fieldName, String fullFieldName, SyntheticFieldLoaderLayer... parts) { + this.fieldName = fieldName; + this.fullFieldName = fullFieldName; + this.parts = parts; + this.hasValue = false; + } + + @Override + public Stream> storedFieldLoaders() { + return Arrays.stream(parts).flatMap(SyntheticFieldLoaderLayer::storedFieldLoaders).map(e -> Map.entry(e.getKey(), values -> { + hasValue = true; + e.getValue().load(values); + })); + } + + @Override + public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { + var loaders = new ArrayList(parts.length); + for (var part : parts) { + var partLoader = part.docValuesLoader(leafReader, docIdsInLeaf); + if (partLoader != null) { + loaders.add(partLoader); + } + } + + if (loaders.isEmpty()) { + return null; + } + + return docId -> { + boolean hasDocs = false; + for (var loader : loaders) { + hasDocs |= loader.advanceToDoc(docId); + } + + this.hasValue |= hasDocs; + return hasDocs; + }; + } + + @Override + public boolean hasValue() { + return hasValue; + } + + @Override + public void write(XContentBuilder b) throws IOException { + var totalCount = Arrays.stream(parts).mapToLong(SyntheticFieldLoaderLayer::valueCount).sum(); + + if (totalCount == 0) { + return; + } + + if (totalCount == 1) { + b.field(fieldName); + for (var part : parts) { + part.write(b); + } + return; + } + + b.startArray(fieldName); + for (var part : parts) { + part.write(b); + } + b.endArray(); + } + + @Override + public String fieldName() { + return this.fullFieldName; + } + + /** + * Represents one layer of loading synthetic source values for a field + * as a part of {@link CompositeSyntheticFieldLoader}. + *
+ * Note that the contract of {@link SourceLoader.SyntheticFieldLoader#write(XContentBuilder)} + * is slightly different here since it only needs to write field values without encompassing object or array. + */ + public interface SyntheticFieldLoaderLayer extends SourceLoader.SyntheticFieldLoader { + /** + * Number of values that this loader will write. + * @return + */ + long valueCount(); + } + + /** + * Layer that loads malformed values stored in a dedicated field with a conventional name. + * @see IgnoreMalformedStoredValues + */ + public static class MalformedValuesLayer implements SyntheticFieldLoaderLayer { + private final String fieldName; + private List values; + + public MalformedValuesLayer(String fieldName) { + this.fieldName = IgnoreMalformedStoredValues.name(fieldName); + this.values = emptyList(); + } + + @Override + public long valueCount() { + return values.size(); + } + + @Override + public Stream> storedFieldLoaders() { + return Stream.of(Map.entry(fieldName, values -> this.values = values)); + } + + @Override + public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { + return null; + } + + @Override + public boolean hasValue() { + return values.isEmpty() == false; + } + + @Override + public void write(XContentBuilder b) throws IOException { + for (Object v : values) { + if (v instanceof BytesRef r) { + XContentDataHelper.decodeAndWrite(b, r); + } else { + b.value(v); + } + } + values = emptyList(); + } + + @Override + public String fieldName() { + return fieldName; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 034e8fd0770f3..0966698277723 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -632,6 +632,24 @@ private static void parseArrayDynamic(DocumentParserContext context, String curr } Mapper objectMapperFromTemplate = DynamicFieldsBuilder.createObjectMapperFromTemplate(context, currentFieldName); if (objectMapperFromTemplate == null) { + if (context.indexSettings().isIgnoreDynamicFieldsBeyondLimit() + && context.mappingLookup().exceedsLimit(context.indexSettings().getMappingTotalFieldsLimit(), 1)) { + if (context.canAddIgnoredField()) { + try { + context.addIgnoredField( + IgnoredSourceFieldMapper.NameValue.fromContext( + context, + currentFieldName, + XContentDataHelper.encodeToken(context.parser()) + ) + ); + } catch (IOException e) { + throw new IllegalArgumentException("failed to parse field [" + currentFieldName + " ]", e); + } + } + context.addIgnoredField(currentFieldName); + return; + } parseNonDynamicArray(context, objectMapperFromTemplate, currentFieldName, currentFieldName); } else { if (parsesArrayValue(objectMapperFromTemplate)) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java index 6ad0823738ba0..3d2c51fb5b8af 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java @@ -128,7 +128,7 @@ public void write(XContentBuilder b) throws IOException { } } - private static String name(String fieldName) { + public static String name(String fieldName) { return fieldName + "._ignore_malformed"; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index f64511f8396ec..efbc75490550d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -10,10 +10,14 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Tuple; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -21,6 +25,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Map; /** @@ -145,6 +150,56 @@ static NameValue decode(Object field) { return new NameValue(name, parentOffset, value, null); } + public record MappedNameValue(NameValue nameValue, XContentType type, Map map) {} + + /** + * Parses the passed byte array as a NameValue and converts its decoded value to a map of maps that corresponds to the field-value + * subtree. There is only a single pair at the top level, with the key corresponding to the field name. If the field contains a single + * value, the map contains a single key-value pair. Otherwise, the value of the first pair will be another map etc. + * @param value encoded NameValue + * @return MappedNameValue with the parsed NameValue, the XContentType to use for serializing its contents and the field-value map. + * @throws IOException + */ + public static MappedNameValue decodeAsMap(byte[] value) throws IOException { + BytesRef bytes = new BytesRef(value); + IgnoredSourceFieldMapper.NameValue nameValue = IgnoredSourceFieldMapper.decode(bytes); + XContentBuilder xContentBuilder = XContentBuilder.builder(XContentDataHelper.getXContentType(nameValue.value()).xContent()); + xContentBuilder.startObject().field(nameValue.name()); + XContentDataHelper.decodeAndWrite(xContentBuilder, nameValue.value()); + xContentBuilder.endObject(); + Tuple> result = XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true); + return new MappedNameValue(nameValue, result.v1(), result.v2()); + } + + /** + * Clones the passed NameValue, using the passed map to produce its value. + * @param mappedNameValue containing the NameValue to clone + * @param map containing a simple field-value pair, or a deeper field-value subtree for objects and arrays with fields + * @return a byte array containing the encoding form of the cloned NameValue + * @throws IOException + */ + public static byte[] encodeFromMap(MappedNameValue mappedNameValue, Map map) throws IOException { + // The first entry is the field name, we skip to get to the value to encode. + assert map.size() == 1; + Object content = map.values().iterator().next(); + + // Check if the field contains a single value or an object. + @SuppressWarnings("unchecked") + XContentBuilder xContentBuilder = (content instanceof Map objectMap) + ? XContentBuilder.builder(mappedNameValue.type().xContent()).map((Map) objectMap) + : XContentBuilder.builder(mappedNameValue.type().xContent()).value(content); + + // Clone the NameValue with the updated value. + NameValue oldNameValue = mappedNameValue.nameValue(); + IgnoredSourceFieldMapper.NameValue filteredNameValue = new IgnoredSourceFieldMapper.NameValue( + oldNameValue.name(), + oldNameValue.parentOffset(), + XContentDataHelper.encodeXContentBuilder(xContentBuilder), + oldNameValue.doc() + ); + return IgnoredSourceFieldMapper.encode(filteredNameValue); + } + // This mapper doesn't contribute to source directly as it has no access to the object structure. Instead, its contents // are loaded by SourceLoader and passed to object mappers that, in turn, write their ignore fields at the appropriate level. @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java index 15caa7f5a6238..fa501a31045e7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java @@ -37,10 +37,6 @@ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDat private final ObjectMapper.Dynamic dynamic; private final MergeReason mergeReason; - MapperBuilderContext(String path) { - this(path, false, false, false, ObjectMapper.Defaults.DYNAMIC, MergeReason.MAPPING_UPDATE); - } - MapperBuilderContext( String path, boolean isSourceSynthetic, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java index 48e04a938d2b2..da005217b1b2d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java @@ -25,10 +25,6 @@ private MapperMergeContext(MapperBuilderContext mapperBuilderContext, NewFieldsB this.newFieldsBudget = newFieldsBudget; } - static MapperMergeContext root(boolean isSourceSynthetic, boolean isDataStream, long newFieldsBudget) { - return root(isSourceSynthetic, isDataStream, MergeReason.MAPPING_UPDATE, newFieldsBudget); - } - /** * The root context, to be used when merging a tree of mappers */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java index 254a0bc9c906b..d97e03d3874ee 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java @@ -92,6 +92,18 @@ static void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOException { } } + /** + * Returns the {@link XContentType} to use for creating an XContentBuilder to decode the passed value. + */ + public static XContentType getXContentType(BytesRef r) { + return switch ((char) r.bytes[r.offset]) { + case JSON_OBJECT_ENCODING -> XContentType.JSON; + case YAML_OBJECT_ENCODING -> XContentType.YAML; + case SMILE_OBJECT_ENCODING -> XContentType.SMILE; + default -> XContentType.CBOR; // CBOR can parse all other encoded types. + }; + } + /** * Stores the current parser structure (subtree) to an {@link XContentBuilder} and returns it, along with a * {@link DocumentParserContext} wrapping it that can be used to reparse the subtree. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 88772169e260c..7d385c189479b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -28,15 +28,6 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.queries.function.FunctionQuery; -import org.apache.lucene.queries.function.valuesource.ByteKnnVectorFieldSource; -import org.apache.lucene.queries.function.valuesource.ByteVectorSimilarityFunction; -import org.apache.lucene.queries.function.valuesource.ConstKnnByteVectorValueSource; -import org.apache.lucene.queries.function.valuesource.ConstKnnFloatValueSource; -import org.apache.lucene.queries.function.valuesource.FloatKnnVectorFieldSource; -import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; @@ -67,6 +58,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.vectors.DenseVectorQuery; import org.elasticsearch.search.vectors.ESDiversifyingChildrenByteKnnVectorQuery; import org.elasticsearch.search.vectors.ESDiversifyingChildrenFloatKnnVectorQuery; import org.elasticsearch.search.vectors.ESKnnByteVectorQuery; @@ -1484,19 +1476,7 @@ private Query createExactKnnByteQuery(byte[] queryVector) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); elementType.checkVectorMagnitude(similarity, ElementType.errorByteElementsAppender(queryVector), squaredMagnitude); } - VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); - return new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) - .add( - new FunctionQuery( - new ByteVectorSimilarityFunction( - vectorSimilarityFunction, - new ByteKnnVectorFieldSource(name()), - new ConstKnnByteVectorValueSource(queryVector) - ) - ), - BooleanClause.Occur.SHOULD - ) - .build(); + return new DenseVectorQuery.Bytes(queryVector, name()); } private Query createExactKnnFloatQuery(float[] queryVector) { @@ -1519,19 +1499,7 @@ && isNotUnitVector(squaredMagnitude)) { } } } - VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); - return new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) - .add( - new FunctionQuery( - new FloatVectorSimilarityFunction( - vectorSimilarityFunction, - new FloatKnnVectorFieldSource(name()), - new ConstKnnFloatValueSource(queryVector) - ) - ), - BooleanClause.Occur.SHOULD - ) - .build(); + return new DenseVectorQuery.Floats(queryVector, name()); } Query createKnnQuery(float[] queryVector, int numCands, Query filter, Float similarityThreshold, BitSetProducer parentFilter) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java index 6e53b8416ebd3..c414f7c100633 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java @@ -18,6 +18,7 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotResponse; @@ -31,6 +32,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.elasticsearch.index.store.StoreFileMetadata; @@ -91,7 +93,13 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener systemLoaderURLs, PluginBundle bundle, M Set pluginUrls = transitiveUrls.get(extendedPlugin); assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin; - // consistency check: extended plugins should not have duplicate codebases with each other - Set intersection = new HashSet<>(extendedPluginUrls); - intersection.retainAll(pluginUrls); - if (intersection.isEmpty() == false) { - throw new IllegalStateException( - "jar hell! extended plugins " + exts + " have duplicate codebases with each other: " + intersection - ); - } - // jar hell check: extended plugins (so far) do not have jar hell with each other extendedPluginUrls.addAll(pluginUrls); JarHell.checkJarHell(extendedPluginUrls, logger::debug); // consistency check: each extended plugin should not have duplicate codebases with implementation+spi of this plugin - intersection = new HashSet<>(bundle.allUrls); + Set intersection = new HashSet<>(bundle.allUrls); intersection.retainAll(pluginUrls); if (intersection.isEmpty() == false) { throw new IllegalStateException( diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java index 50aa7881cd2b6..85f06580cee79 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java @@ -16,6 +16,7 @@ public record RepositoriesMetrics( MeterRegistry meterRegistry, LongCounter requestCounter, LongCounter exceptionCounter, + LongCounter requestRangeNotSatisfiedExceptionCounter, LongCounter throttleCounter, LongCounter operationCounter, LongCounter unsuccessfulOperationCounter, @@ -28,6 +29,8 @@ public record RepositoriesMetrics( public static final String METRIC_REQUESTS_TOTAL = "es.repositories.requests.total"; public static final String METRIC_EXCEPTIONS_TOTAL = "es.repositories.exceptions.total"; + public static final String METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL = + "es.repositories.exceptions.request_range_not_satisfied.total"; public static final String METRIC_THROTTLES_TOTAL = "es.repositories.throttles.total"; public static final String METRIC_OPERATIONS_TOTAL = "es.repositories.operations.total"; public static final String METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL = "es.repositories.operations.unsuccessful.total"; @@ -40,6 +43,11 @@ public RepositoriesMetrics(MeterRegistry meterRegistry) { meterRegistry, meterRegistry.registerLongCounter(METRIC_REQUESTS_TOTAL, "repository request counter", "unit"), meterRegistry.registerLongCounter(METRIC_EXCEPTIONS_TOTAL, "repository request exception counter", "unit"), + meterRegistry.registerLongCounter( + METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL, + "repository request RequestedRangeNotSatisfiedException counter", + "unit" + ), meterRegistry.registerLongCounter(METRIC_THROTTLES_TOTAL, "repository request throttle counter", "unit"), meterRegistry.registerLongCounter(METRIC_OPERATIONS_TOTAL, "repository operation counter", "unit"), meterRegistry.registerLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, "repository unsuccessful operation counter", "unit"), diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 181fe6afb97d9..c63be88215655 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -495,8 +495,8 @@ public static class UnregisterRepositoryTask extends AckedClusterStateUpdateTask * Constructor used by {@link org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction} * @param name the repository name */ - public UnregisterRepositoryTask(String name) { - this(new DeleteRepositoryRequest(name), null); + public UnregisterRepositoryTask(TimeValue dummyTimeout, String name) { + this(new DeleteRepositoryRequest(dummyTimeout, dummyTimeout, name), null); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e27ba56bed974..8f55bf16c1674 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.SingleResultDeduplicator; @@ -58,8 +59,15 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.compress.DeflateCompressor; import org.elasticsearch.common.compress.NotXContentException; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.metrics.CounterMetric; @@ -77,6 +85,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; @@ -122,16 +131,21 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.Closeable; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.UncheckedIOException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Collection; @@ -156,6 +170,8 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.zip.DeflaterOutputStream; +import java.util.zip.InflaterInputStream; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; @@ -1010,10 +1026,35 @@ class SnapshotsDeletion { // The overall flow of execution void runDelete(SnapshotDeleteListener listener) { + final var releasingListener = new SnapshotDeleteListener() { + @Override + public void onDone() { + try { + shardBlobsToDelete.close(); + } finally { + listener.onDone(); + } + } + + @Override + public void onRepositoryDataWritten(RepositoryData repositoryData) { + listener.onRepositoryDataWritten(repositoryData); + } + + @Override + public void onFailure(Exception e) { + try { + shardBlobsToDelete.close(); + } finally { + listener.onFailure(e); + } + + } + }; if (useShardGenerations) { - runWithUniqueShardMetadataNaming(listener); + runWithUniqueShardMetadataNaming(releasingListener); } else { - runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(listener)); + runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener)); } } @@ -1088,14 +1129,15 @@ void runCleanup(ActionListener listener) { .map(IndexId::getId) .collect(Collectors.toSet()); final List staleRootBlobs = staleRootBlobs(originalRepositoryData, originalRootBlobs.keySet()); + final var releasingListener = ActionListener.releaseAfter(listener, shardBlobsToDelete); if (survivingIndexIds.equals(originalIndexContainers.keySet()) && staleRootBlobs.isEmpty()) { // Nothing to clean up we return - listener.onResponse(DeleteResult.ZERO); + releasingListener.onResponse(DeleteResult.ZERO); } else { // write new index-N blob to ensure concurrent operations will fail updateRepositoryData( originalRepositoryData, - listener.delegateFailureAndWrap( + releasingListener.delegateFailureAndWrap( // TODO should we pass newRepositoryData to cleanupStaleBlobs()? (l, newRepositoryData) -> cleanupUnlinkedRootAndIndicesBlobs( originalRepositoryData, @@ -1513,33 +1555,62 @@ private void logStaleRootLevelBlobs( /** * Tracks the shard-level blobs which can be deleted once all the metadata updates have completed during a snapshot deletion. */ - class ShardBlobsToDelete { + class ShardBlobsToDelete implements Releasable { /** * The result of removing a snapshot from a shard folder in the repository. * - * @param indexId Index that the snapshot was removed from + * @param indexId Repository UUID for index that the snapshot was removed from * @param shardId Shard id that the snapshot was removed from - * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation */ - private record ShardSnapshotMetaDeleteResult( - IndexId indexId, - int shardId, - ShardGeneration newGeneration, - Collection blobsToDelete - ) {} + private record ShardSnapshotMetaDeleteResult(String indexId, int shardId, Collection blobsToDelete) { + ShardSnapshotMetaDeleteResult(StreamInput in) throws IOException { + this(in.readString(), in.readVInt(), in.readStringCollectionAsImmutableList()); + assert in.getTransportVersion().equals(TransportVersion.current()); // only used in memory on the local node + } + + void writeTo(StreamOutput out) throws IOException { + assert out.getTransportVersion().equals(TransportVersion.current()); // only used in memory on the local node + out.writeString(indexId); + out.writeVInt(shardId); + out.writeStringCollection(blobsToDelete); + } + } /** *

- * Shard-level results, see {@link ShardSnapshotMetaDeleteResult}. + * Shard-level results, i.e. a sequence of {@link ShardSnapshotMetaDeleteResult} objects, except serialized, concatenated, and + * compressed in order to reduce the memory footprint by about 4x when compared with a list of bare objects. This can be GiBs in + * size if we're deleting snapshots from a large repository, especially if earlier failures left behind lots of dangling blobs + * for some reason. *

*

- * Writes to this list are all synchronized (via {@link #addShardDeleteResult}), and happen-before it is read so the reads need - * no further synchronization + * Writes to this object are all synchronized (via {@link #addShardDeleteResult}), and happen-before it is read, so the reads + * need no further synchronization. *

*/ - private final List shardDeleteResults = new ArrayList<>(); + // If the size of this continues to be a problem even after compression, consider either a hard limit on its size (preferring leaked + // blobs over an OOME on the master) or else offloading it to disk or to the repository itself. + private final BytesStreamOutput shardDeleteResults = new ReleasableBytesStreamOutput(bigArrays); + + private int resultCount = 0; + + private final StreamOutput compressed = new OutputStreamStreamOutput( + new BufferedOutputStream( + new DeflaterOutputStream(Streams.flushOnCloseStream(shardDeleteResults)), + DeflateCompressor.BUFFER_SIZE + ) + ); + + private final ArrayList resources = new ArrayList<>(); + + private final ShardGenerations.Builder shardGenerationsBuilder = ShardGenerations.builder(); + + ShardBlobsToDelete() { + resources.add(compressed); + resources.add(LeakTracker.wrap((Releasable) shardDeleteResults)); + } synchronized void addShardDeleteResult( IndexId indexId, @@ -1547,23 +1618,62 @@ synchronized void addShardDeleteResult( ShardGeneration newGeneration, Collection blobsToDelete ) { - shardDeleteResults.add(new ShardSnapshotMetaDeleteResult(indexId, shardId, newGeneration, blobsToDelete)); + try { + shardGenerationsBuilder.put(indexId, shardId, newGeneration); + new ShardSnapshotMetaDeleteResult(Objects.requireNonNull(indexId.getId()), shardId, blobsToDelete).writeTo(compressed); + resultCount += 1; + } catch (IOException e) { + assert false : e; // no IO actually happens here + throw new UncheckedIOException(e); + } } public ShardGenerations getUpdatedShardGenerations() { - final var builder = ShardGenerations.builder(); - for (var shardResult : shardDeleteResults) { - builder.put(shardResult.indexId, shardResult.shardId, shardResult.newGeneration); - } - return builder.build(); + return shardGenerationsBuilder.build(); } public Iterator getBlobPaths() { - return Iterators.flatMap(shardDeleteResults.iterator(), shardResult -> { - final var shardPath = shardPath(shardResult.indexId, shardResult.shardId).buildAsString(); + final StreamInput input; + try { + compressed.close(); + input = new InputStreamStreamInput( + new BufferedInputStream( + new InflaterInputStream(shardDeleteResults.bytes().streamInput()), + DeflateCompressor.BUFFER_SIZE + ) + ); + resources.add(input); + } catch (IOException e) { + assert false : e; // no IO actually happens here + throw new UncheckedIOException(e); + } + + return Iterators.flatMap(Iterators.forRange(0, resultCount, i -> { + try { + return new ShardSnapshotMetaDeleteResult(input); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }), shardResult -> { + final var shardPath = shardPath(new IndexId("_na_", shardResult.indexId), shardResult.shardId).buildAsString(); return Iterators.map(shardResult.blobsToDelete.iterator(), blob -> shardPath + blob); }); } + + @Override + public void close() { + try { + IOUtils.close(resources); + } catch (IOException e) { + assert false : e; // no IO actually happens here + throw new UncheckedIOException(e); + } + } + + // exposed for tests + int sizeInBytes() { + return shardDeleteResults.size(); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java index d8852155f8d77..92cdf57102f42 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -121,4 +122,9 @@ default void validate(MasterNodeRequest request) { * @throws IOException */ T fromXContent(XContentParser parser) throws IOException; + + /** + * Reserved-state handlers create master-node requests but never actually send them to the master node so the timeouts are not relevant. + */ + TimeValue DUMMY_TIMEOUT = TimeValue.THIRTY_SECONDS; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java index d2c6626cb35c1..ac9748251953c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java @@ -41,10 +41,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String name = request.param("repository"); - CleanupRepositoryRequest cleanupRepositoryRequest = new CleanupRepositoryRequest(name); - cleanupRepositoryRequest.ackTimeout(getAckTimeout(request)); - cleanupRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var cleanupRepositoryRequest = new CleanupRepositoryRequest( + getMasterNodeTimeout(request), + getAckTimeout(request), + request.param("repository") + ); return channel -> client.admin().cluster().cleanupRepository(cleanupRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java index b6b63a6774667..713c3243f5ec8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java @@ -47,12 +47,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC try (var parser = request.contentParser()) { final Map source = parser.map(); final CloneSnapshotRequest cloneSnapshotRequest = new CloneSnapshotRequest( + getMasterNodeTimeout(request), request.param("repository"), request.param("snapshot"), request.param("target_snapshot"), XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) ); - cloneSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java index 9491ecfcc1115..8f4e42e210be8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java @@ -43,9 +43,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String repository = request.param("repository"); String snapshot = request.param("snapshot"); - CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); + final var createSnapshotRequest = new CreateSnapshotRequest(getMasterNodeTimeout(request), repository, snapshot); request.applyContentParser(p -> createSnapshotRequest.source(p.mapOrdered())); - createSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); return channel -> client.admin().cluster().createSnapshot(createSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index 067a40e293ff8..26e9cd101a53d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -45,9 +45,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String name = request.param("repository"); - DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(name); - deleteRepositoryRequest.ackTimeout(getAckTimeout(request)); - deleteRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var deleteRepositoryRequest = new DeleteRepositoryRequest(getMasterNodeTimeout(request), getAckTimeout(request), name); return channel -> client.admin() .cluster() .deleteRepository( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java index 37870c44fe256..74c258fd8b402 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java @@ -61,8 +61,7 @@ public Set supportedQueryParameters() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String repository = request.param("repository"); String[] snapshots = Strings.splitStringByCommaToArray(request.param("snapshot")); - DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(repository, snapshots); - deleteSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var deleteSnapshotRequest = new DeleteSnapshotRequest(getMasterNodeTimeout(request), repository, snapshots); deleteSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", deleteSnapshotRequest.waitForCompletion())); return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java index c2d4484f1e098..4c15b6514c7bd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java @@ -51,8 +51,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); - GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(repositories); - getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var getRepositoriesRequest = new GetRepositoriesRequest(getMasterNodeTimeout(request), repositories); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); settingsFilter.addFilterSettingParams(request); return channel -> client.admin().cluster().getRepositories(getRepositoriesRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 45913b9b3ce2a..2b3ef6581e14f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -58,7 +58,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); - GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(repositories).snapshots(snapshots); + final var getSnapshotsRequest = new GetSnapshotsRequest(getMasterNodeTimeout(request), repositories).snapshots(snapshots); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); getSnapshotsRequest.verbose(request.paramAsBoolean("verbose", getSnapshotsRequest.verbose())); final SnapshotSortKey sort = SnapshotSortKey.of(request.param("sort", getSnapshotsRequest.sort().toString())); @@ -81,7 +81,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final SortOrder order = SortOrder.fromString(request.param("order", getSnapshotsRequest.order().toString())); getSnapshotsRequest.order(order); getSnapshotsRequest.includeIndexNames(request.paramAsBoolean(INDEX_NAMES_XCONTENT_PARAM, getSnapshotsRequest.includeIndexNames())); - getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .getSnapshots(getSnapshotsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java index b25e394185877..51c4f0a6c9e13 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java @@ -47,13 +47,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String name = request.param("repository"); - PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(name); + final var putRepositoryRequest = new PutRepositoryRequest(getMasterNodeTimeout(request), getAckTimeout(request), name); try (XContentParser parser = request.contentParser()) { putRepositoryRequest.source(parser.mapOrdered()); } putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); - putRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - putRepositoryRequest.ackTimeout(getAckTimeout(request)); return channel -> client.admin() .cluster() .putRepository( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java index dcf6a1d165e7a..21a8349770a45 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -43,8 +44,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final ResetFeatureStateRequest req = new ResetFeatureStateRequest(); - + final var req = new ResetFeatureStateRequest(RestUtils.getMasterNodeTimeout(request)); return restChannel -> client.execute(ResetFeatureStateAction.INSTANCE, req, new RestToXContentListener<>(restChannel, r -> { long failures = r.getFeatureStateResetStatuses() .stream() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java index 06524a040db36..eeae14d230ca4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java @@ -40,10 +40,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String repository = request.param("repository"); - String snapshot = request.param("snapshot"); - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); - restoreSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var restoreSnapshotRequest = new RestoreSnapshotRequest( + getMasterNodeTimeout(request), + request.param("repository"), + request.param("snapshot") + ); restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); request.applyContentParser(p -> restoreSnapshotRequest.source(p.mapOrdered())); return channel -> client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index 33b4ba04b826e..f8cb2c6086978 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -46,15 +46,13 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String repository = request.param("repository", "_all"); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); if (snapshots.length == 1 && "_all".equalsIgnoreCase(snapshots[0])) { snapshots = Strings.EMPTY_ARRAY; } - SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository).snapshots(snapshots); + final var snapshotsStatusRequest = new SnapshotsStatusRequest(getMasterNodeTimeout(request), request.param("repository", "_all")); + snapshotsStatusRequest.snapshots(snapshots); snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); - - snapshotsStatusRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .snapshotsStatus(snapshotsStatusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java index b36c4ac56ae71..850975672e25f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java @@ -37,9 +37,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final GetSnapshottableFeaturesRequest req = new GetSnapshottableFeaturesRequest(); - req.masterNodeTimeout(getMasterNodeTimeout(request)); - - return restChannel -> { client.execute(SnapshottableFeaturesAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; + final var req = new GetSnapshottableFeaturesRequest(getMasterNodeTimeout(request)); + return restChannel -> client.execute(SnapshottableFeaturesAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java index 9880268f617db..9477895f0f758 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java @@ -38,10 +38,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String name = request.param("repository"); - VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(name); - verifyRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - verifyRepositoryRequest.ackTimeout(getAckTimeout(request)); + final var verifyRepositoryRequest = new VerifyRepositoryRequest( + getMasterNodeTimeout(request), + getAckTimeout(request), + request.param("repository") + ); return channel -> client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java index 5744923b86d6c..6eacafef2795e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -37,10 +38,8 @@ public List routes() { @Override protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { - GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); + final var getRepositoriesRequest = new GetRepositoriesRequest(getMasterNodeTimeout(request), Strings.EMPTY_ARRAY); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); - getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - return channel -> client.admin() .cluster() .getRepositories(getRepositoriesRequest, new RestResponseListener(channel) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index 0ff44e37698d9..d6fc9efc183f6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -49,16 +49,18 @@ public String getName() { return "cat_snapshot_action"; } + private static final String[] MATCH_ALL_PATTERNS = { ResolvedRepositories.ALL_PATTERN }; + @Override protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { - final String[] matchAll = { ResolvedRepositories.ALL_PATTERN }; - GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest().repositories(request.paramAsStringArray("repository", matchAll)) - .snapshots(matchAll); + final var getSnapshotsRequest = new GetSnapshotsRequest( + getMasterNodeTimeout(request), + request.paramAsStringArray("repository", MATCH_ALL_PATTERNS), + MATCH_ALL_PATTERNS + ); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); - getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - return channel -> client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(GetSnapshotsResponse getSnapshotsResponse) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/search/vectors/DenseVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/DenseVectorQuery.java new file mode 100644 index 0000000000000..8fd59a0e6f224 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/vectors/DenseVectorQuery.java @@ -0,0 +1,209 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.index.ByteVectorValues; +import org.apache.lucene.index.FloatVectorValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.VectorScorer; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Exact knn query. Will iterate and score all documents that have the provided dense vector field in the index. + */ +public abstract class DenseVectorQuery extends Query { + + protected final String field; + + public DenseVectorQuery(String field) { + this.field = field; + } + + @Override + public void visit(QueryVisitor queryVisitor) { + queryVisitor.visitLeaf(this); + } + + abstract static class DenseVectorWeight extends Weight { + private final String field; + private final float boost; + + protected DenseVectorWeight(DenseVectorQuery query, float boost) { + super(query); + this.field = query.field; + this.boost = boost; + } + + abstract VectorScorer vectorScorer(LeafReaderContext leafReaderContext) throws IOException; + + @Override + public Explanation explain(LeafReaderContext leafReaderContext, int i) throws IOException { + VectorScorer vectorScorer = vectorScorer(leafReaderContext); + if (vectorScorer == null) { + return Explanation.noMatch("No vector values found for field: " + field); + } + DocIdSetIterator iterator = vectorScorer.iterator(); + iterator.advance(i); + if (iterator.docID() == i) { + float score = vectorScorer.score(); + return Explanation.match(vectorScorer.score() * boost, "found vector with calculated similarity: " + score); + } + return Explanation.noMatch("Document not found in vector values for field: " + field); + } + + @Override + public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException { + VectorScorer vectorScorer = vectorScorer(leafReaderContext); + if (vectorScorer == null) { + return null; + } + return new DenseVectorScorer(this, vectorScorer); + } + + @Override + public boolean isCacheable(LeafReaderContext leafReaderContext) { + return true; + } + } + + public static class Floats extends DenseVectorQuery { + + private final float[] query; + + public Floats(float[] query, String field) { + super(field); + this.query = query; + } + + public float[] getQuery() { + return query; + } + + @Override + public String toString(String field) { + return "DenseVectorQuery.Floats"; + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new DenseVectorWeight(Floats.this, boost) { + @Override + VectorScorer vectorScorer(LeafReaderContext leafReaderContext) throws IOException { + FloatVectorValues vectorValues = leafReaderContext.reader().getFloatVectorValues(field); + if (vectorValues == null) { + return null; + } + return vectorValues.scorer(query); + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Floats floats = (Floats) o; + return Objects.equals(field, floats.field) && Objects.deepEquals(query, floats.query); + } + + @Override + public int hashCode() { + return Objects.hash(field, Arrays.hashCode(query)); + } + } + + public static class Bytes extends DenseVectorQuery { + + private final byte[] query; + + public Bytes(byte[] query, String field) { + super(field); + this.query = query; + } + + @Override + public String toString(String field) { + return "DenseVectorQuery.Bytes"; + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new DenseVectorWeight(Bytes.this, boost) { + @Override + VectorScorer vectorScorer(LeafReaderContext leafReaderContext) throws IOException { + ByteVectorValues vectorValues = leafReaderContext.reader().getByteVectorValues(field); + if (vectorValues == null) { + return null; + } + return vectorValues.scorer(query); + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Bytes bytes = (Bytes) o; + return Objects.equals(field, bytes.field) && Objects.deepEquals(query, bytes.query); + } + + @Override + public int hashCode() { + return Objects.hash(field, Arrays.hashCode(query)); + } + } + + static class DenseVectorScorer extends Scorer { + + private final VectorScorer vectorScorer; + private final DocIdSetIterator iterator; + private final float boost; + + DenseVectorScorer(DenseVectorWeight weight, VectorScorer vectorScorer) { + super(weight); + this.vectorScorer = vectorScorer; + this.iterator = vectorScorer.iterator(); + this.boost = weight.boost; + } + + @Override + public DocIdSetIterator iterator() { + return vectorScorer.iterator(); + } + + @Override + public float getMaxScore(int i) throws IOException { + // TODO: can we optimize this at all? + return Float.POSITIVE_INFINITY; + } + + @Override + public float score() throws IOException { + assert iterator.docID() != -1; + return vectorScorer.score() * boost; + } + + @Override + public int docID() { + return iterator.docID(); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java index f02d551fd9a44..168eac6d60245 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java @@ -27,7 +27,7 @@ public class PutRepositoryRequestTests extends ESTestCase { @SuppressWarnings("unchecked") public void testCreateRepositoryToXContent() throws IOException { Map mapParams = new HashMap<>(); - PutRepositoryRequest request = new PutRepositoryRequest(); + PutRepositoryRequest request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); String repoName = "test"; request.name(repoName); mapParams.put("name", repoName); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 794be6e463548..2886ca7be4821 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -38,7 +38,7 @@ public void testToXContent() throws IOException { String repo = randomAlphaOfLength(5); String snap = randomAlphaOfLength(10); - CreateSnapshotRequest original = new CreateSnapshotRequest(repo, snap); + CreateSnapshotRequest original = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repo, snap); if (randomBoolean()) { List indices = new ArrayList<>(); @@ -106,7 +106,11 @@ public void testToXContent() throws IOException { .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) ) { Map map = parser.mapOrdered(); - CreateSnapshotRequest processed = new CreateSnapshotRequest((String) map.get("repository"), (String) map.get("snapshot")); + CreateSnapshotRequest processed = new CreateSnapshotRequest( + TEST_REQUEST_TIMEOUT, + (String) map.get("repository"), + (String) map.get("snapshot") + ); processed.waitForCompletion(original.waitForCompletion()); processed.masterNodeTimeout(original.masterNodeTimeout()); processed.source(map); @@ -162,7 +166,8 @@ public void testSizeCheck() { } private CreateSnapshotRequest createSnapshotRequestWithMetadata(Map metadata) { - return new CreateSnapshotRequest(randomAlphaOfLength(5), randomAlphaOfLength(5)).indices(randomAlphaOfLength(5)) - .userMetadata(metadata); + return new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, randomAlphaOfLength(5), randomAlphaOfLength(5)).indices( + randomAlphaOfLength(5) + ).userMetadata(metadata); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java index 810d297602e8a..b8e958169fc97 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java @@ -18,62 +18,70 @@ public class GetSnapshotsRequestTests extends ESTestCase { public void testValidateParameters() { { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot"); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot"); assertNull(request.validate()); request.size(0); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("size must be -1 or greater than 0")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").size(randomIntBetween(1, 500)); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").size( + randomIntBetween(1, 500) + ); assertNull(request.validate()); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).size(randomIntBetween(1, 500)); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .size(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use size limit with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).offset(randomIntBetween(1, 500)); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .offset(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use offset with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).sort(SnapshotSortKey.INDICES); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .sort(SnapshotSortKey.INDICES); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use non-default sort with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).order(SortOrder.DESC); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .order(SortOrder.DESC); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use non-default sort order with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false) + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).fromSortValue("bar"); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .fromSortValue("bar"); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use from_sort_value with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").after( + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").after( new SnapshotSortKey.After("foo", "repo", "bar") ).offset(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and offset simultaneously")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").fromSortValue("foo") + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").fromSortValue("foo") .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and from_sort_value simultaneously")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").policies("some-policy").verbose(false); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").policies("some-policy") + .verbose(false); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use slm policy filter with verbose=false")); } @@ -81,6 +89,7 @@ public void testValidateParameters() { public void testGetDescription() { final GetSnapshotsRequest request = new GetSnapshotsRequest( + TEST_REQUEST_TIMEOUT, new String[] { "repo1", "repo2" }, new String[] { "snapshotA", "snapshotB" } ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java index fc75fb6650b16..1496609f9bc1f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java @@ -30,10 +30,10 @@ protected Writeable.Reader instanceReader() { protected GetShardSnapshotRequest createTestInstance() { ShardId shardId = randomShardId(); if (randomBoolean()) { - return GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); + return GetShardSnapshotRequest.latestSnapshotInAllRepositories(TEST_REQUEST_TIMEOUT, shardId); } else { List repositories = randomList(1, randomIntBetween(1, 100), () -> randomAlphaOfLength(randomIntBetween(1, 100))); - return GetShardSnapshotRequest.latestSnapshotInRepositories(shardId, repositories); + return GetShardSnapshotRequest.latestSnapshotInRepositories(TEST_REQUEST_TIMEOUT, shardId, repositories); } } @@ -41,9 +41,9 @@ protected GetShardSnapshotRequest createTestInstance() { protected GetShardSnapshotRequest mutateInstance(GetShardSnapshotRequest instance) { ShardId shardId = randomShardId(); if (instance.getFromAllRepositories()) { - return GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); + return GetShardSnapshotRequest.latestSnapshotInAllRepositories(TEST_REQUEST_TIMEOUT, shardId); } else { - return GetShardSnapshotRequest.latestSnapshotInRepositories(shardId, instance.getRepositories()); + return GetShardSnapshotRequest.latestSnapshotInRepositories(TEST_REQUEST_TIMEOUT, shardId, instance.getRepositories()); } } @@ -52,7 +52,11 @@ private ShardId randomShardId() { } public void testGetDescription() { - final GetShardSnapshotRequest request = new GetShardSnapshotRequest(Arrays.asList("repo1", "repo2"), new ShardId("idx", "uuid", 0)); + final GetShardSnapshotRequest request = new GetShardSnapshotRequest( + TEST_REQUEST_TIMEOUT, + Arrays.asList("repo1", "repo2"), + new ShardId("idx", "uuid", 0) + ); assertThat(request.getDescription(), equalTo("shard[idx][0], repositories[repo1,repo2]")); final GetShardSnapshotRequest randomRequest = createTestInstance(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index 45c0a5990f117..53bbfb775f631 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -107,7 +107,7 @@ private RestoreSnapshotRequest randomState(RestoreSnapshotRequest instance) { @Override protected RestoreSnapshotRequest createTestInstance() { - return randomState(new RestoreSnapshotRequest(randomAlphaOfLength(5), randomAlphaOfLength(10))); + return randomState(new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, randomAlphaOfLength(5), randomAlphaOfLength(10))); } @Override @@ -139,7 +139,7 @@ public void testSource() throws IOException { // we will only restore properties from the map that are contained in the request body. All other // properties are restored from the original (in the actual REST action this is restored from the // REST path and request parameters). - RestoreSnapshotRequest processed = new RestoreSnapshotRequest(original.repository(), original.snapshot()); + RestoreSnapshotRequest processed = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, original.repository(), original.snapshot()); processed.masterNodeTimeout(original.masterNodeTimeout()); processed.waitForCompletion(original.waitForCompletion()); diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index 652e7f014b8ef..39e424adecfce 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -117,7 +117,7 @@ public void testActions() { .execute(new AssertingActionListener<>(TransportClusterStatsAction.TYPE.name(), client.threadPool())); client.admin() .cluster() - .prepareCreateSnapshot("repo", "bck") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "bck") .execute(new AssertingActionListener<>(TransportCreateSnapshotAction.TYPE.name(), client.threadPool())); client.execute( TransportClusterRerouteAction.TYPE, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java index 7e338c52a0a17..922a1405bddff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java @@ -191,6 +191,48 @@ public void testValidateLifecycleIndexTemplateWithWarning() { ); } + /** + * Make sure we still take into account component templates during validation (and not just the index template). + */ + public void testValidateLifecycleComponentTemplateWithWarning() { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + HeaderWarning.setThreadContext(threadContext); + TimeValue defaultRetention = randomTimeValue(2, 100, TimeUnit.DAYS); + MetadataIndexTemplateService.validateLifecycle( + Metadata.builder() + .componentTemplates( + Map.of( + "component-template", + new ComponentTemplate( + new Template( + null, + null, + null, + new DataStreamLifecycle( + new DataStreamLifecycle.Retention(randomTimeValue(2, 100, TimeUnit.DAYS)), + null, + null + ) + ), + null, + null + ) + ) + ) + .build(), + randomAlphaOfLength(10), + ComposableIndexTemplate.builder() + .template(new Template(null, null, null, DataStreamLifecycle.DEFAULT)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .indexPatterns(List.of(randomAlphaOfLength(10))) + .componentTemplates(List.of("component-template")) + .build(), + new DataStreamGlobalRetention(defaultRetention, null) + ); + Map> responseHeaders = threadContext.getResponseHeaders(); + assertThat(responseHeaders.size(), is(0)); + } + public void testValidateLifecycleInComponentTemplate() throws Exception { IndicesService indicesService = mock(IndicesService.class); IndexService indexService = mock(IndexService.class); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java index a3e11c0645e32..5b50eb63e1489 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.common.xcontent.support; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -421,25 +420,4 @@ public void testParseToType() throws IOException { assertThat(names, equalTo(Set.of("a", "c"))); } - - public void testDrainAndClose() throws IOException { - String json = """ - { "a": "b", "c": "d", "e": {"f": "g"}, "h": ["i", "j", {"k": "l"}]}"""; - var parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json); - var content = XContentBuilder.builder(XContentType.JSON.xContent()); - XContentHelper.drainAndClose(parser, content); - - assertEquals(json.replace(" ", ""), Strings.toString(content)); - assertTrue(parser.isClosed()); - } - - public void testDrainAndCloseAlreadyClosed() throws IOException { - var parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, "{}"); - parser.close(); - - assertThrows( - IllegalStateException.class, - () -> XContentHelper.drainAndClose(parser, XContentBuilder.builder(XContentType.JSON.xContent())) - ); - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index 886b0aa9e425d..d6b675ed0eb51 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -159,7 +159,7 @@ public void testFieldAliasWithDifferentNestedScopes() { private static FieldMapper createFieldMapper(String parent, String name) { return new BooleanFieldMapper.Builder(name, ScriptCompiler.NONE, false, IndexVersion.current()).build( - new MapperBuilderContext(parent) + new MapperBuilderContext(parent, false, false, false, ObjectMapper.Defaults.DYNAMIC, MapperService.MergeReason.MAPPING_UPDATE) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index e7f8a16c5cc10..fc30b9b6677f1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -11,16 +11,19 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.xcontent.XContentBuilder; +import org.hamcrest.Matchers; import java.io.IOException; import java.math.BigInteger; import java.util.Base64; +import java.util.List; import java.util.Locale; +import java.util.Map; public class IgnoredSourceFieldMapperTests extends MapperServiceTestCase { - private String getSyntheticSourceWithFieldLimit(CheckedConsumer build) throws IOException { - DocumentMapper documentMapper = createMapperService( + private DocumentMapper getDocumentMapperWithFieldLimit() throws IOException { + return createMapperService( Settings.builder() .put("index.mapping.total_fields.limit", 2) .put("index.mapping.total_fields.ignore_dynamic_beyond_limit", true) @@ -30,6 +33,15 @@ private String getSyntheticSourceWithFieldLimit(CheckedConsumer build) throws IOException { + DocumentMapper mapper = getDocumentMapperWithFieldLimit(); + return mapper.parse(source(build)); + } + + private String getSyntheticSourceWithFieldLimit(CheckedConsumer build) throws IOException { + DocumentMapper documentMapper = getDocumentMapperWithFieldLimit(); return syntheticSource(documentMapper, build); } @@ -78,7 +90,54 @@ public void testIgnoredBytes() throws IOException { public void testIgnoredObjectBoolean() throws IOException { boolean value = randomBoolean(); - assertEquals("{\"my_value\":" + value + "}", getSyntheticSourceWithFieldLimit(b -> b.field("my_value", value))); + assertEquals("{\"my_object\":{\"my_value\":" + value + "}}", getSyntheticSourceWithFieldLimit(b -> { + b.startObject("my_object").field("my_value", value).endObject(); + })); + } + + public void testIgnoredArray() throws IOException { + assertEquals("{\"my_array\":[{\"int_value\":10},{\"int_value\":20}]}", getSyntheticSourceWithFieldLimit(b -> { + b.startArray("my_array"); + b.startObject().field("int_value", 10).endObject(); + b.startObject().field("int_value", 20).endObject(); + b.endArray(); + })); + } + + public void testEncodeFieldToMap() throws IOException { + String value = randomAlphaOfLength(5); + ParsedDocument parsedDocument = getParsedDocumentWithFieldLimit(b -> b.field("my_value", value)); + byte[] bytes = parsedDocument.rootDoc().getField(IgnoredSourceFieldMapper.NAME).binaryValue().bytes; + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(bytes); + assertEquals("my_value", mappedNameValue.nameValue().name()); + assertEquals(value, mappedNameValue.map().get("my_value")); + } + + @SuppressWarnings("unchecked") + public void testEncodeObjectToMapAndDecode() throws IOException { + String value = randomAlphaOfLength(5); + ParsedDocument parsedDocument = getParsedDocumentWithFieldLimit( + b -> { b.startObject("my_object").field("my_value", value).endObject(); } + ); + byte[] bytes = parsedDocument.rootDoc().getField(IgnoredSourceFieldMapper.NAME).binaryValue().bytes; + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(bytes); + assertEquals("my_object", mappedNameValue.nameValue().name()); + assertEquals(value, ((Map) mappedNameValue.map().get("my_object")).get("my_value")); + assertArrayEquals(bytes, IgnoredSourceFieldMapper.encodeFromMap(mappedNameValue, mappedNameValue.map())); + } + + public void testEncodeArrayToMapAndDecode() throws IOException { + ParsedDocument parsedDocument = getParsedDocumentWithFieldLimit(b -> { + b.startArray("my_array"); + b.startObject().field("int_value", 10).endObject(); + b.startObject().field("int_value", 20).endObject(); + b.endArray(); + }); + byte[] bytes = parsedDocument.rootDoc().getField(IgnoredSourceFieldMapper.NAME).binaryValue().bytes; + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(bytes); + assertEquals("my_array", mappedNameValue.nameValue().name()); + assertThat((List) mappedNameValue.map().get("my_array"), Matchers.contains(Map.of("int_value", 10), Map.of("int_value", 20))); + assertArrayEquals(bytes, IgnoredSourceFieldMapper.encodeFromMap(mappedNameValue, mappedNameValue.map())); } public void testMultipleIgnoredFieldsRootObject() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java index 77d3259ea1091..3cea8a3403307 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java @@ -10,21 +10,23 @@ import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; + public class MapperMergeContextTests extends ESTestCase { public void testAddFieldIfPossibleUnderLimit() { - MapperMergeContext context = MapperMergeContext.root(false, false, 1); + MapperMergeContext context = MapperMergeContext.root(false, false, MAPPING_UPDATE, 1); assertTrue(context.decrementFieldBudgetIfPossible(1)); assertFalse(context.decrementFieldBudgetIfPossible(1)); } public void testAddFieldIfPossibleAtLimit() { - MapperMergeContext context = MapperMergeContext.root(false, false, 0); + MapperMergeContext context = MapperMergeContext.root(false, false, MAPPING_UPDATE, 0); assertFalse(context.decrementFieldBudgetIfPossible(1)); } public void testAddFieldIfPossibleUnlimited() { - MapperMergeContext context = MapperMergeContext.root(false, false, Long.MAX_VALUE); + MapperMergeContext context = MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE); assertTrue(context.decrementFieldBudgetIfPossible(Integer.MAX_VALUE)); assertTrue(context.decrementFieldBudgetIfPossible(Integer.MAX_VALUE)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java index 01cbe496e6a3d..1ab1d881d76b3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java @@ -16,6 +16,8 @@ import java.util.Map; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; + public class MultiFieldsTests extends ESTestCase { public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordField() { @@ -45,7 +47,11 @@ public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordFieldDuring keywordFieldMapperBuilder ).build(MapperBuilderContext.root(false, false)); - builder.merge(newField, new FieldMapper.Conflicts("TextFieldMapper"), MapperMergeContext.root(false, false, Long.MAX_VALUE)); + builder.merge( + newField, + new FieldMapper.Conflicts("TextFieldMapper"), + MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE) + ); var expected = hasNormalizer == false; assertEquals(expected, builder.multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index c767429d4c0fb..289f12d1508f9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1511,7 +1511,7 @@ public void testMergeNested() { MapperException e = expectThrows( MapperException.class, - () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, MergeReason.MAPPING_UPDATE, Long.MAX_VALUE)) ); assertThat(e.getMessage(), containsString("[include_in_parent] parameter can't be updated on a nested object mapping")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 94a4c2ea92fbb..25ef3c8550ec0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -13,6 +13,9 @@ import java.util.Collections; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.INDEX_TEMPLATE; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; + public final class ObjectMapperMergeTests extends ESTestCase { private final RootObjectMapper rootObjectMapper = createMapping(false, true, true, false); @@ -41,7 +44,10 @@ public void testMerge() { ObjectMapper mergeWith = createMapping(false, true, true, true); // WHEN merging mappings - final ObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = rootObjectMapper.merge( + mergeWith, + MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE) + ); // THEN "baz" new field is added to merged mapping final ObjectMapper mergedFoo = (ObjectMapper) merged.getMapper("foo"); @@ -63,7 +69,7 @@ public void testMergeWhenDisablingField() { // THEN a MapperException is thrown with an excepted message MapperException e = expectThrows( MapperException.class, - () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("the [enabled] parameter can't be updated for the object mapping [foo]", e.getMessage()); } @@ -75,7 +81,7 @@ public void testMergeDisabledField() { new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertFalse(((ObjectMapper) merged.getMapper("disabled")).isEnabled()); } @@ -84,14 +90,11 @@ public void testMergeEnabled() { MapperException e = expectThrows( MapperException.class, - () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("the [enabled] parameter can't be updated for the object mapping [disabled]", e.getMessage()); - ObjectMapper result = rootObjectMapper.merge( - mergeWith, - MapperMergeContext.root(false, false, MapperService.MergeReason.INDEX_TEMPLATE, Long.MAX_VALUE) - ); + ObjectMapper result = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, INDEX_TEMPLATE, Long.MAX_VALUE)); assertTrue(result.isEnabled()); } @@ -105,14 +108,11 @@ public void testMergeEnabledForRootMapper() { MapperException e = expectThrows( MapperException.class, - () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("the [enabled] parameter can't be updated for the object mapping [" + type + "]", e.getMessage()); - ObjectMapper result = firstMapper.merge( - secondMapper, - MapperMergeContext.root(false, false, MapperService.MergeReason.INDEX_TEMPLATE, Long.MAX_VALUE) - ); + ObjectMapper result = firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, INDEX_TEMPLATE, Long.MAX_VALUE)); assertFalse(result.isEnabled()); } @@ -126,7 +126,7 @@ public void testMergeDisabledRootMapper() { Collections.singletonMap("test", new TestRuntimeField("test", "long")) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertFalse(merged.isEnabled()); assertEquals(1, merged.runtimeFields().size()); assertEquals("test", merged.runtimeFields().iterator().next().name()); @@ -136,7 +136,7 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { RootObjectMapper mergeInto = createRootSubobjectFalseLeafWithDots(); RootObjectMapper mergeWith = createRootSubobjectFalseLeafWithDots(); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); final KeywordFieldMapper keywordFieldMapper = (KeywordFieldMapper) merged.getMapper("host.name"); assertEquals("host.name", keywordFieldMapper.name()); @@ -151,7 +151,7 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { createObjectSubobjectsFalseLeafWithDots() ).build(MapperBuilderContext.root(false, false)); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); ObjectMapper foo = (ObjectMapper) merged.getMapper("foo"); ObjectMapper metrics = (ObjectMapper) foo.getMapper("metrics"); @@ -166,7 +166,7 @@ public void testMergedFieldNamesMultiFields() { RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); TextFieldMapper text = (TextFieldMapper) merged.getMapper("text"); assertEquals("text", text.name()); @@ -184,7 +184,7 @@ public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); ObjectMapper foo = (ObjectMapper) merged.getMapper("foo"); ObjectMapper metrics = (ObjectMapper) foo.getMapper("metrics"); @@ -201,8 +201,8 @@ public void testMergeWithLimit() { ObjectMapper mergeWith = createMapping(false, true, true, true); // WHEN merging mappings - final ObjectMapper mergedAdd0 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - final ObjectMapper mergedAdd1 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + final ObjectMapper mergedAdd0 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + final ObjectMapper mergedAdd1 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); // THEN "baz" new field is added to merged mapping assertEquals(3, rootObjectMapper.getTotalFieldsCount()); @@ -219,10 +219,10 @@ public void testMergeWithLimitTruncatedObjectField() { ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); - ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, 1)); - ObjectMapper mergedAdd2 = root.merge(mergeWith, MapperMergeContext.root(false, false, 2)); - ObjectMapper mergedAdd3 = root.merge(mergeWith, MapperMergeContext.root(false, false, 3)); + ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); + ObjectMapper mergedAdd2 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 2)); + ObjectMapper mergedAdd3 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 3)); assertEquals(0, root.getTotalFieldsCount()); assertEquals(0, mergedAdd0.getTotalFieldsCount()); assertEquals(1, mergedAdd1.getTotalFieldsCount()); @@ -252,8 +252,8 @@ public void testMergeSameObjectDifferentFields() { ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); - ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); assertEquals(2, root.getTotalFieldsCount()); assertEquals(2, mergedAdd0.getTotalFieldsCount()); assertEquals(3, mergedAdd1.getTotalFieldsCount()); @@ -280,8 +280,8 @@ public void testMergeWithLimitMultiField() { assertEquals(2, mergeInto.getTotalFieldsCount()); assertEquals(2, mergeWith.getTotalFieldsCount()); - ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); assertEquals(2, mergedAdd0.getTotalFieldsCount()); assertEquals(3, mergedAdd1.getTotalFieldsCount()); } @@ -297,8 +297,8 @@ public void testMergeWithLimitRuntimeField() { assertEquals(3, mergeInto.getTotalFieldsCount()); assertEquals(2, mergeWith.getTotalFieldsCount()); - ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); assertEquals(3, mergedAdd0.getTotalFieldsCount()); assertEquals(4, mergedAdd1.getTotalFieldsCount()); } @@ -315,7 +315,7 @@ public void testMergeSubobjectsFalseWithObject() { ) ).build(MapperBuilderContext.root(false, false)); - ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); ObjectMapper parentMapper = (ObjectMapper) merged.getMapper("parent"); assertNotNull(parentMapper); assertNotNull(parentMapper.getMapper("child.grandchild")); @@ -332,7 +332,16 @@ private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { KeywordFieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.current()); - KeywordFieldMapper fieldMapper = fieldBuilder.build(new MapperBuilderContext("foo.metrics")); + KeywordFieldMapper fieldMapper = fieldBuilder.build( + new MapperBuilderContext( + "foo.metrics", + false, + false, + false, + ObjectMapper.Defaults.DYNAMIC, + MapperService.MergeReason.MAPPING_UPDATE + ) + ); assertEquals("host.name", fieldMapper.simpleName()); assertEquals("foo.metrics.host.name", fieldMapper.name()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( @@ -342,7 +351,16 @@ private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { private ObjectMapper.Builder createObjectSubobjectsFalseLeafWithMultiField() { TextFieldMapper.Builder fieldBuilder = createTextKeywordMultiField("host.name"); - TextFieldMapper textKeywordMultiField = fieldBuilder.build(new MapperBuilderContext("foo.metrics")); + TextFieldMapper textKeywordMultiField = fieldBuilder.build( + new MapperBuilderContext( + "foo.metrics", + false, + false, + false, + ObjectMapper.Defaults.DYNAMIC, + MapperService.MergeReason.MAPPING_UPDATE + ) + ); assertEquals("host.name", textKeywordMultiField.simpleName()); assertEquals("foo.metrics.host.name", textKeywordMultiField.name()); FieldMapper fieldMapper = textKeywordMultiField.multiFields.iterator().next(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java index 0ec1997ae652e..308f775ec7b28 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java @@ -40,6 +40,7 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; @@ -348,7 +349,7 @@ public void testMerging() { {"type":"test_mapper","fixed":true,"fixed2":true,"required":"value"}"""); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> mapper.merge(badMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> mapper.merge(badMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); String expectedError = """ Mapper for [field] conflicts with existing mapper: @@ -361,7 +362,7 @@ public void testMerging() { // TODO: should we have to include 'fixed' here? Or should updates take as 'defaults' the existing values? TestMapper goodMerge = fromMapping(""" {"type":"test_mapper","fixed":false,"variable":"updated","required":"value"}"""); - TestMapper merged = (TestMapper) mapper.merge(goodMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper merged = (TestMapper) mapper.merge(goodMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertEquals("{\"field\":" + mapping + "}", Strings.toString(mapper)); // original mapping is unaffected assertEquals(""" @@ -379,7 +380,7 @@ public void testMultifields() throws IOException { String addSubField = """ {"type":"test_mapper","variable":"foo","required":"value","fields":{"sub2":{"type":"keyword"}}}"""; TestMapper toMerge = fromMapping(addSubField); - TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertEquals(XContentHelper.stripWhitespace(""" { "field": { @@ -402,7 +403,7 @@ public void testMultifields() throws IOException { TestMapper badToMerge = fromMapping(badSubField); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> merged.merge(badToMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> merged.merge(badToMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("mapper [field.sub2] cannot be changed from type [keyword] to [binary]", e.getMessage()); } @@ -418,13 +419,16 @@ public void testCopyTo() { TestMapper toMerge = fromMapping(""" {"type":"test_mapper","variable":"updated","required":"value","copy_to":["foo","bar"]}"""); - TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertEquals(""" {"field":{"type":"test_mapper","variable":"updated","required":"value","copy_to":["foo","bar"]}}""", Strings.toString(merged)); TestMapper removeCopyTo = fromMapping(""" {"type":"test_mapper","variable":"updated","required":"value"}"""); - TestMapper noCopyTo = (TestMapper) merged.merge(removeCopyTo, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper noCopyTo = (TestMapper) merged.merge( + removeCopyTo, + MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE) + ); assertEquals(""" {"field":{"type":"test_mapper","variable":"updated","required":"value"}}""", Strings.toString(noCopyTo)); } @@ -473,7 +477,7 @@ public void testObjectSerialization() throws IOException { MapperService mapperService = createMapperService(mapping); assertEquals(mapping, Strings.toString(mapperService.documentMapper().mapping())); - mapperService.merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", new CompressedXContent(mapping), MAPPING_UPDATE); assertEquals(mapping, Strings.toString(mapperService.documentMapper().mapping())); } @@ -490,7 +494,7 @@ public void testCustomSerialization() { TestMapper toMerge = fromMapping(conflict); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> mapper.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> mapper.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals( "Mapper for [field] conflicts with existing mapper:\n" @@ -581,7 +585,7 @@ public void testAnalyzers() { TestMapper toMerge = fromMapping(mapping); e = expectThrows( IllegalArgumentException.class, - () -> original.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> original.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals( "Mapper for [field] conflicts with existing mapper:\n" + "\tCannot update parameter [analyzer] from [default] to [_standard]", diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index fa4c8bb089855..f178e66955fdc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -8,11 +8,6 @@ package org.elasticsearch.index.mapper.vectors; -import org.apache.lucene.queries.function.FunctionQuery; -import org.apache.lucene.queries.function.valuesource.ByteVectorSimilarityFunction; -import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.Query; @@ -25,6 +20,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.DenseVectorFieldType; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.VectorSimilarity; +import org.elasticsearch.search.vectors.DenseVectorQuery; import org.elasticsearch.search.vectors.VectorData; import java.io.IOException; @@ -218,16 +214,7 @@ public void testExactKnnQuery() { queryVector[i] = randomFloat(); } Query query = field.createExactKnnQuery(VectorData.fromFloats(queryVector)); - assertTrue(query instanceof BooleanQuery); - BooleanQuery booleanQuery = (BooleanQuery) query; - boolean foundFunction = false; - for (BooleanClause clause : booleanQuery) { - if (clause.getQuery() instanceof FunctionQuery functionQuery) { - foundFunction = true; - assertTrue(functionQuery.getValueSource() instanceof FloatVectorSimilarityFunction); - } - } - assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + assertTrue(query instanceof DenseVectorQuery.Floats); } { DenseVectorFieldType field = new DenseVectorFieldType( @@ -245,16 +232,7 @@ public void testExactKnnQuery() { queryVector[i] = randomByte(); } Query query = field.createExactKnnQuery(VectorData.fromBytes(queryVector)); - assertTrue(query instanceof BooleanQuery); - BooleanQuery booleanQuery = (BooleanQuery) query; - boolean foundFunction = false; - for (BooleanClause clause : booleanQuery) { - if (clause.getQuery() instanceof FunctionQuery functionQuery) { - foundFunction = true; - assertTrue(functionQuery.getValueSource() instanceof ByteVectorSimilarityFunction); - } - } - assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + assertTrue(query instanceof DenseVectorQuery.Bytes); } } diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java index a7cc74582afdc..33a593f5aa125 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java @@ -227,13 +227,9 @@ public void testJarHellDuplicateCodebaseAcrossDeps() throws Exception { transitiveDeps.put("dep2", Collections.singleton(dupJar.toUri().toURL())); PluginDescriptor info1 = newTestDescriptor("myplugin", List.of("dep1", "dep2")); PluginBundle bundle = new PluginBundle(info1, pluginDir); - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> PluginsUtils.checkBundleJarHell(JarHell.parseModulesAndClassPath(), bundle, transitiveDeps) - ); - assertEquals("failed to load plugin myplugin due to jar hell", e.getMessage()); - assertThat(e.getCause().getMessage(), containsString("jar hell!")); - assertThat(e.getCause().getMessage(), containsString("duplicate codebases")); + PluginsUtils.checkBundleJarHell(JarHell.parseModulesAndClassPath(), bundle, transitiveDeps); + Set transitive = transitiveDeps.get("myplugin"); + assertThat(transitive, containsInAnyOrder(pluginJar.toUri().toURL(), dupJar.toUri().toURL())); } // Note: testing dup codebase with core is difficult because it requires a symlink, but we have mock filesystems and security manager diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 7be1dcdcf7b77..83cb189415f7e 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -192,7 +192,9 @@ public void testRegisterRejectsInvalidRepositoryNames() { public void testPutRepositoryVerificationFails() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type(VerificationFailRepository.TYPE).verify(true); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(VerificationFailRepository.TYPE) + .verify(true); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); var failure = safeAwaitFailure(resultListener); @@ -203,14 +205,18 @@ public void testPutRepositoryVerificationFails() { public void testPutRepositoryVerificationFailsOnExisting() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type(TestRepository.TYPE).verify(true); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(TestRepository.TYPE) + .verify(true); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); var ackResponse = safeAwait(resultListener); assertTrue(ackResponse.isAcknowledged()); // try to update existing repository with faulty repo and make sure it is not applied - request = new PutRepositoryRequest().name(repoName).type(VerificationFailRepository.TYPE).verify(true); + request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(VerificationFailRepository.TYPE) + .verify(true); resultListener = new SubscribableListener<>(); repositoriesService.registerRepository(request, resultListener); var failure = safeAwaitFailure(resultListener); @@ -221,7 +227,9 @@ public void testPutRepositoryVerificationFailsOnExisting() { public void testPutRepositorySkipVerification() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type(VerificationFailRepository.TYPE).verify(false); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(VerificationFailRepository.TYPE) + .verify(false); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); var ackResponse = safeAwait(resultListener); @@ -280,7 +288,7 @@ public void testRemoveUnknownRepositoryTypeWhenApplyingClusterState() { public void testRegisterRepositoryFailsForUnknownType() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type("unknown"); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName).type("unknown"); repositoriesService.registerRepository(request, new ActionListener<>() { @Override @@ -359,7 +367,7 @@ public void testRegisterRepositorySuccessAfterCreationFailed() { assertThat(repo, isA(InvalidRepository.class)); // 2. repository creation successfully when current node become master node and repository is put again - var request = new PutRepositoryRequest().name(repoName).type(TestRepository.TYPE); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName).type(TestRepository.TYPE); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); @@ -385,7 +393,13 @@ private ClusterState emptyState() { } private void assertThrowsOnRegister(String repoName) { - expectThrows(RepositoryException.class, () -> repositoriesService.registerRepository(new PutRepositoryRequest(repoName), null)); + expectThrows( + RepositoryException.class, + () -> repositoriesService.registerRepository( + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName), + null + ) + ); } private static class TestRepository implements Repository { diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java index 0d94c027f8c46..1361385521378 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java @@ -172,28 +172,36 @@ public void testDeleteThrottling() { assertAcked( client().admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", repoPath)) ); - client().admin().cluster().prepareCreateSnapshot(TEST_REPO_NAME, "snapshot-1").setWaitForCompletion(true).get(); - client().admin().cluster().prepareCreateSnapshot(TEST_REPO_NAME, "snapshot-2").setWaitForCompletion(true).get(); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "snapshot-1") + .setWaitForCompletion(true) + .get(); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "snapshot-2") + .setWaitForCompletion(true) + .get(); - assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME)); + assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME)); // Now delete one of the snapshots using the test repo implementation which verifies the throttling behaviour assertAcked( client().admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(TEST_REPO_TYPE) .setSettings(Settings.builder().put("location", repoPath)) ); - assertAcked(client().admin().cluster().prepareDeleteSnapshot(TEST_REPO_NAME, "snapshot-1").get()); + assertAcked(client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "snapshot-1").get()); - assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME)); + assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME)); } } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 486390f27391c..ac23f646e5c52 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -58,7 +58,6 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -106,7 +105,7 @@ public void testRetrieveSnapshots() { logger.info("--> creating repository"); AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(Settings.builder().put(node().settings()).put("location", location)) .get(); @@ -126,7 +125,7 @@ public void testRetrieveSnapshots() { logger.info("--> create first snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "test-snap-1") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -135,7 +134,7 @@ public void testRetrieveSnapshots() { logger.info("--> create second snapshot"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "test-snap-2") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -256,7 +255,7 @@ public void testBadChunksize() { RepositoryException.class, () -> client.admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings( Settings.builder() @@ -286,10 +285,11 @@ public void testRepositoryDataDetails() throws Exception { ); final long beforeStartTime = getInstanceFromNode(ThreadPool.class).absoluteTimeInMillis(); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setPartial(true) - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repositoryName, + "test-snap-1" + ).setWaitForCompletion(true).setPartial(true).get(); final long afterEndTime = System.currentTimeMillis(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); @@ -347,7 +347,7 @@ private BlobStoreRepository setupRepo() { } AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(repoSettings) .setVerify(false) // prevent eager reading of repo data @@ -364,7 +364,10 @@ private BlobStoreRepository setupRepo() { @After public void removeRepo() { try { - client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME).get(TimeValue.timeValueSeconds(10)); + client().admin() + .cluster() + .prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) + .get(TimeValue.timeValueSeconds(10)); } catch (RepositoryMissingException e) { // ok, not all tests create the test repo } @@ -512,39 +515,50 @@ private Environment createEnvironment() { public void testShardBlobsToDelete() { final var repo = setupRepo(); - final var shardBlobsToDelete = repo.new ShardBlobsToDelete(); - final var expectedShardGenerations = ShardGenerations.builder(); - final var expectedBlobsToDelete = new HashSet(); - - final var countDownLatch = new CountDownLatch(1); - try (var refs = new RefCountingRunnable(countDownLatch::countDown)) { - for (int index = between(0, 10); index > 0; index--) { - final var indexId = new IndexId(randomIdentifier(), randomUUID()); - for (int shard = between(1, 3); shard > 0; shard--) { - final var shardId = shard; - final var shardGeneration = new ShardGeneration(randomUUID()); - expectedShardGenerations.put(indexId, shard, shardGeneration); - final var blobsToDelete = randomList(10, ESTestCase::randomIdentifier); - final var indexPath = repo.basePath().add("indices").add(indexId.getId()).add(Integer.toString(shard)).buildAsString(); - for (final var blobToDelete : blobsToDelete) { - expectedBlobsToDelete.add(indexPath + blobToDelete); - } - - repo.threadPool() - .generic() - .execute( - ActionRunnable.run( - refs.acquireListener(), - () -> shardBlobsToDelete.addShardDeleteResult(indexId, shardId, shardGeneration, blobsToDelete) - ) + try (var shardBlobsToDelete = repo.new ShardBlobsToDelete()) { + final var expectedShardGenerations = ShardGenerations.builder(); + final var expectedBlobsToDelete = new HashSet(); + + final var countDownLatch = new CountDownLatch(1); + int blobCount = 0; + try (var refs = new RefCountingRunnable(countDownLatch::countDown)) { + for (int index = between(0, 1000); index > 0; index--) { + final var indexId = new IndexId(randomIdentifier(), randomUUID()); + for (int shard = between(1, 30); shard > 0; shard--) { + final var shardId = shard; + final var shardGeneration = new ShardGeneration(randomUUID()); + expectedShardGenerations.put(indexId, shard, shardGeneration); + final var blobsToDelete = randomList( + 100, + () -> randomFrom("meta-", "index-", "snap-") + randomUUID() + randomFrom("", ".dat") ); + blobCount += blobsToDelete.size(); + final var indexPath = repo.basePath() + .add("indices") + .add(indexId.getId()) + .add(Integer.toString(shard)) + .buildAsString(); + for (final var blobToDelete : blobsToDelete) { + expectedBlobsToDelete.add(indexPath + blobToDelete); + } + + repo.threadPool() + .generic() + .execute( + ActionRunnable.run( + refs.acquireListener(), + () -> shardBlobsToDelete.addShardDeleteResult(indexId, shardId, shardGeneration, blobsToDelete) + ) + ); + } } } + safeAwait(countDownLatch); + assertEquals(expectedShardGenerations.build(), shardBlobsToDelete.getUpdatedShardGenerations()); + shardBlobsToDelete.getBlobPaths().forEachRemaining(s -> assertTrue(expectedBlobsToDelete.remove(s))); + assertThat(expectedBlobsToDelete, empty()); + assertThat(shardBlobsToDelete.sizeInBytes(), lessThanOrEqualTo(Math.max(ByteSizeUnit.KB.toIntBytes(1), 20 * blobCount))); } - safeAwait(countDownLatch); - assertEquals(expectedShardGenerations.build(), shardBlobsToDelete.getUpdatedShardGenerations()); - shardBlobsToDelete.getBlobPaths().forEachRemaining(s -> assertTrue(expectedBlobsToDelete.remove(s))); - assertThat(expectedBlobsToDelete, empty()); } public void testUuidCreationLogging() { @@ -555,7 +569,10 @@ public void testUuidCreationLogging() { MockLog.assertThatLogger( () -> safeGet( - client().execute(TransportCreateSnapshotAction.TYPE, new CreateSnapshotRequest(repoName, snapshot).waitForCompletion(true)) + client().execute( + TransportCreateSnapshotAction.TYPE, + new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshot).waitForCompletion(true) + ) ), BlobStoreRepository.class, new MockLog.SeenEventExpectation( @@ -569,14 +586,21 @@ public void testUuidCreationLogging() { MockLog.assertThatLogger( // no more "Generated" messages ... () -> { - safeGet(client().execute(TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(repoName))); + safeGet( + client().execute( + TransportDeleteRepositoryAction.TYPE, + new DeleteRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) + ) + ); // we get a "Registering" message when re-registering the repository with ?verify=true (the default) MockLog.assertThatLogger( () -> safeGet( client().execute( TransportPutRepositoryAction.TYPE, - new PutRepositoryRequest(repoName).type("fs").verify(true).settings(repoMetadata.settings()) + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).type("fs") + .verify(true) + .settings(repoMetadata.settings()) ) ), RepositoriesService.class, @@ -591,23 +615,31 @@ public void testUuidCreationLogging() { safeGet( client().execute( TransportCreateSnapshotAction.TYPE, - new CreateSnapshotRequest(repoName, randomIdentifier()).waitForCompletion(true) + new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()).waitForCompletion(true) ) ); assertTrue( - safeGet(client().execute(TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(repoName))).getSnapshots() + safeGet(client().execute(TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, repoName))) + .getSnapshots() .stream() .anyMatch(snapshotInfo -> snapshotInfo.snapshotId().getName().equals(snapshot)) ); - safeGet(client().execute(TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(repoName))); + safeGet( + client().execute( + TransportDeleteRepositoryAction.TYPE, + new DeleteRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) + ) + ); // No "Registering" message with ?verify=false because we don't read the repo data yet MockLog.assertThatLogger( () -> safeGet( client().execute( TransportPutRepositoryAction.TYPE, - new PutRepositoryRequest(repoName).type("fs").verify(false).settings(repoMetadata.settings()) + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).type("fs") + .verify(false) + .settings(repoMetadata.settings()) ) ), RepositoriesService.class, @@ -624,7 +656,7 @@ public void testUuidCreationLogging() { () -> safeGet( client().execute( TransportCreateSnapshotAction.TYPE, - new CreateSnapshotRequest(repoName, randomIdentifier()).waitForCompletion(true) + new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()).waitForCompletion(true) ) ), RepositoriesService.class, diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractDenseVectorQueryTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractDenseVectorQueryTestCase.java new file mode 100644 index 0000000000000..6d2d600a18a81 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractDenseVectorQueryTestCase.java @@ -0,0 +1,307 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.store.BaseDirectoryWrapper; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.apache.lucene.index.VectorSimilarityFunction.COSINE; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +abstract class AbstractDenseVectorQueryTestCase extends ESTestCase { + + abstract DenseVectorQuery getDenseVectorQuery(String field, float[] query); + + abstract float[] randomVector(int dim); + + abstract Field getKnnVectorField(String name, float[] vector, VectorSimilarityFunction similarityFunction); + + public void testEquals() { + DenseVectorQuery q1 = getDenseVectorQuery("f1", new float[] { 0, 1 }); + DenseVectorQuery q2 = getDenseVectorQuery("f1", new float[] { 0, 1 }); + + assertEquals(q2, q1); + + assertNotEquals(null, q1); + assertNotEquals(q1, new TermQuery(new Term("f1", "x"))); + + assertNotEquals(q1, getDenseVectorQuery("f2", new float[] { 0, 1 })); + assertNotEquals(q1, getDenseVectorQuery("f1", new float[] { 1, 1 })); + } + + public void testEmptyIndex() throws IOException { + try (Directory indexStore = getIndexStore("field"); IndexReader reader = DirectoryReader.open(indexStore)) { + IndexSearcher searcher = newSearcher(reader); + DenseVectorQuery kvq = getDenseVectorQuery("field", new float[] { 1, 2 }); + assertMatches(searcher, kvq, 0); + } + } + + /** testDimensionMismatch */ + public void testDimensionMismatch() throws IOException { + try ( + Directory indexStore = getIndexStore("field", new float[] { 0, 1 }, new float[] { 1, 2 }, new float[] { 0, 0 }); + IndexReader reader = DirectoryReader.open(indexStore) + ) { + IndexSearcher searcher = newSearcher(reader); + DenseVectorQuery kvq = getDenseVectorQuery("field", new float[] { 0 }); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> searcher.search(kvq, 10)); + assertEquals("vector query dimension: 1 differs from field dimension: 2", e.getMessage()); + } + } + + /** testNonVectorField */ + public void testNonVectorField() throws IOException { + try ( + Directory indexStore = getIndexStore("field", new float[] { 0, 1 }, new float[] { 1, 2 }, new float[] { 0, 0 }); + IndexReader reader = DirectoryReader.open(indexStore) + ) { + IndexSearcher searcher = newSearcher(reader); + assertMatches(searcher, getDenseVectorQuery("xyzzy", new float[] { 0 }), 0); + assertMatches(searcher, getDenseVectorQuery("id", new float[] { 0 }), 0); + } + } + + public void testScoreEuclidean() throws IOException { + float[][] vectors = new float[5][]; + for (int j = 0; j < 5; j++) { + vectors[j] = new float[] { j, j }; + } + try ( + Directory d = getStableIndexStore("field", VectorSimilarityFunction.EUCLIDEAN, vectors); + IndexReader reader = DirectoryReader.open(d) + ) { + IndexSearcher searcher = new IndexSearcher(reader); + float[] queryVector = new float[] { 2, 3 }; + DenseVectorQuery query = getDenseVectorQuery("field", queryVector); + Query rewritten = query.rewrite(searcher); + Weight weight = searcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); + Scorer scorer = weight.scorer(reader.leaves().get(0)); + + // prior to advancing, score is 0 + assertEquals(-1, scorer.docID()); + + DocIdSetIterator it = scorer.iterator(); + assertEquals(5, it.cost()); + it.nextDoc(); + int curDoc = 0; + // iterate the docs and assert the scores are what we expect + while (it.docID() != NO_MORE_DOCS) { + assertEquals(VectorSimilarityFunction.EUCLIDEAN.compare(vectors[curDoc], queryVector), scorer.score(), 0.0001); + curDoc++; + it.nextDoc(); + } + } + } + + public void testScoreCosine() throws IOException { + float[][] vectors = new float[5][]; + for (int j = 1; j <= 5; j++) { + vectors[j - 1] = new float[] { j, j * j }; + } + try (Directory d = getStableIndexStore("field", COSINE, vectors)) { + try (IndexReader reader = DirectoryReader.open(d)) { + assertEquals(1, reader.leaves().size()); + IndexSearcher searcher = new IndexSearcher(reader); + float[] queryVector = new float[] { 2, 3 }; + DenseVectorQuery query = getDenseVectorQuery("field", queryVector); + Query rewritten = query.rewrite(searcher); + Weight weight = searcher.createWeight(rewritten, ScoreMode.COMPLETE, 1); + Scorer scorer = weight.scorer(reader.leaves().get(0)); + + // prior to advancing, score is undefined + assertEquals(-1, scorer.docID()); + DocIdSetIterator it = scorer.iterator(); + assertEquals(5, it.cost()); + it.nextDoc(); + int curDoc = 0; + // iterate the docs and assert the scores are what we expect + while (it.docID() != NO_MORE_DOCS) { + assertEquals(COSINE.compare(vectors[curDoc], queryVector), scorer.score(), 0.0001); + curDoc++; + it.nextDoc(); + } + } + } + } + + public void testScoreMIP() throws IOException { + try ( + Directory indexStore = getIndexStore( + "field", + VectorSimilarityFunction.MAXIMUM_INNER_PRODUCT, + new float[] { 0, 1 }, + new float[] { 1, 2 }, + new float[] { 0, 0 } + ); + IndexReader reader = DirectoryReader.open(indexStore) + ) { + IndexSearcher searcher = newSearcher(reader); + DenseVectorQuery kvq = getDenseVectorQuery("field", new float[] { 0, -1 }); + assertMatches(searcher, kvq, 3); + ScoreDoc[] scoreDocs = searcher.search(kvq, 3).scoreDocs; + assertIdMatches(reader, "id2", scoreDocs[0]); + assertIdMatches(reader, "id0", scoreDocs[1]); + assertIdMatches(reader, "id1", scoreDocs[2]); + + assertEquals(1.0, scoreDocs[0].score, 1e-7); + assertEquals(1 / 2f, scoreDocs[1].score, 1e-7); + assertEquals(1 / 3f, scoreDocs[2].score, 1e-7); + } + } + + public void testExplain() throws IOException { + float[][] vectors = new float[5][]; + for (int j = 0; j < 5; j++) { + vectors[j] = new float[] { j, j }; + } + try (Directory d = getStableIndexStore("field", VectorSimilarityFunction.EUCLIDEAN, vectors)) { + try (IndexReader reader = DirectoryReader.open(d)) { + IndexSearcher searcher = new IndexSearcher(reader); + DenseVectorQuery query = getDenseVectorQuery("field", new float[] { 2, 3 }); + Explanation matched = searcher.explain(query, 2); + assertTrue(matched.isMatch()); + assertEquals(1 / 2f, matched.getValue()); + assertEquals(0, matched.getDetails().length); + + Explanation nomatch = searcher.explain(query, 6); + assertFalse(nomatch.isMatch()); + + nomatch = searcher.explain(getDenseVectorQuery("someMissingField", new float[] { 2, 3 }), 6); + assertFalse(nomatch.isMatch()); + } + } + } + + public void testRandom() throws IOException { + int numDocs = atLeast(100); + int dimension = atLeast(5); + int numIters = atLeast(10); + boolean everyDocHasAVector = random().nextBoolean(); + try (Directory d = newDirectoryForTest()) { + RandomIndexWriter w = new RandomIndexWriter(random(), d); + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + if (everyDocHasAVector || random().nextInt(10) != 2) { + doc.add(getKnnVectorField("field", randomVector(dimension), VectorSimilarityFunction.EUCLIDEAN)); + } + w.addDocument(doc); + } + w.close(); + try (IndexReader reader = DirectoryReader.open(d)) { + IndexSearcher searcher = newSearcher(reader); + for (int i = 0; i < numIters; i++) { + DenseVectorQuery query = getDenseVectorQuery("field", randomVector(dimension)); + int n = random().nextInt(100) + 1; + TopDocs results = searcher.search(query, n); + assert reader.hasDeletions() == false; + assertTrue(results.totalHits.value >= results.scoreDocs.length); + // verify the results are in descending score order + float last = Float.MAX_VALUE; + for (ScoreDoc scoreDoc : results.scoreDocs) { + assertTrue(scoreDoc.score <= last); + last = scoreDoc.score; + } + } + } + } + } + + void assertIdMatches(IndexReader reader, String expectedId, ScoreDoc scoreDoc) throws IOException { + String actualId = reader.storedFields().document(scoreDoc.doc).get("id"); + assertEquals(expectedId, actualId); + } + + private void assertMatches(IndexSearcher searcher, Query q, int expectedMatches) throws IOException { + ScoreDoc[] result = searcher.search(q, 1000).scoreDocs; + assertEquals(expectedMatches, result.length); + } + + Directory getIndexStore(String field, float[]... contents) throws IOException { + return getIndexStore(field, VectorSimilarityFunction.EUCLIDEAN, contents); + } + + private Directory getStableIndexStore(String field, VectorSimilarityFunction vectorSimilarityFunction, float[]... contents) + throws IOException { + Directory indexStore = newDirectoryForTest(); + try (IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig())) { + for (int i = 0; i < contents.length; ++i) { + Document doc = new Document(); + doc.add(getKnnVectorField(field, contents[i], vectorSimilarityFunction)); + doc.add(new StringField("id", "id" + i, Field.Store.YES)); + writer.addDocument(doc); + } + // Add some documents without a vector + for (int i = 0; i < 5; i++) { + Document doc = new Document(); + doc.add(new StringField("other", "value", Field.Store.NO)); + writer.addDocument(doc); + } + } + return indexStore; + } + + Directory getIndexStore(String field, VectorSimilarityFunction vectorSimilarityFunction, float[]... contents) throws IOException { + Directory indexStore = newDirectoryForTest(); + RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore); + for (int i = 0; i < contents.length; ++i) { + Document doc = new Document(); + doc.add(getKnnVectorField(field, contents[i], vectorSimilarityFunction)); + doc.add(new StringField("id", "id" + i, Field.Store.YES)); + writer.addDocument(doc); + if (randomBoolean()) { + // Add some documents without a vector + for (int j = 0; j < randomIntBetween(1, 5); j++) { + doc = new Document(); + doc.add(new StringField("other", "value", Field.Store.NO)); + // Add fields that will be matched by our test filters but won't have vectors + doc.add(new StringField("id", "id" + j, Field.Store.YES)); + writer.addDocument(doc); + } + } + } + // Add some documents without a vector + for (int i = 0; i < 5; i++) { + Document doc = new Document(); + doc.add(new StringField("other", "value", Field.Store.NO)); + writer.addDocument(doc); + } + writer.close(); + return indexStore; + } + + protected BaseDirectoryWrapper newDirectoryForTest() { + return LuceneTestCase.newDirectory(random()); + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/DenseVectorQueryBytesTests.java b/server/src/test/java/org/elasticsearch/search/vectors/DenseVectorQueryBytesTests.java new file mode 100644 index 0000000000000..8007f5048adca --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/DenseVectorQueryBytesTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KnnByteVectorField; +import org.apache.lucene.index.VectorSimilarityFunction; + +public class DenseVectorQueryBytesTests extends AbstractDenseVectorQueryTestCase { + @Override + DenseVectorQuery getDenseVectorQuery(String field, float[] query) { + byte[] bytes = new byte[query.length]; + for (int i = 0; i < query.length; i++) { + bytes[i] = (byte) query[i]; + } + return new DenseVectorQuery.Bytes(bytes, field); + } + + @Override + float[] randomVector(int dim) { + byte[] bytes = new byte[dim]; + random().nextBytes(bytes); + float[] floats = new float[dim]; + for (int i = 0; i < dim; i++) { + floats[i] = bytes[i]; + } + return floats; + } + + @Override + Field getKnnVectorField(String name, float[] vector, VectorSimilarityFunction similarityFunction) { + byte[] bytes = new byte[vector.length]; + for (int i = 0; i < vector.length; i++) { + bytes[i] = (byte) vector[i]; + } + return new KnnByteVectorField(name, bytes, similarityFunction); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/DenseVectorQueryFloatsTests.java b/server/src/test/java/org/elasticsearch/search/vectors/DenseVectorQueryFloatsTests.java new file mode 100644 index 0000000000000..04355ee53d3c9 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/DenseVectorQueryFloatsTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KnnFloatVectorField; +import org.apache.lucene.index.VectorSimilarityFunction; + +public class DenseVectorQueryFloatsTests extends AbstractDenseVectorQueryTestCase { + @Override + DenseVectorQuery getDenseVectorQuery(String field, float[] query) { + return new DenseVectorQuery.Floats(query, field); + } + + @Override + float[] randomVector(int dim) { + float[] vector = new float[dim]; + for (int i = 0; i < vector.length; i++) { + vector[i] = randomFloat(); + } + return vector; + } + + @Override + Field getKnnVectorField(String name, float[] vector, VectorSimilarityFunction similarityFunction) { + return new KnnFloatVectorField(name, vector, similarityFunction); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java index 02093d9fa0e44..1e77e35b60a4c 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java @@ -8,11 +8,8 @@ package org.elasticsearch.search.vectors; -import org.apache.lucene.queries.function.FunctionQuery; -import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.util.VectorUtil; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.IndexVersions; @@ -25,9 +22,9 @@ import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.List; -import java.util.Locale; public class ExactKnnQueryBuilderTests extends AbstractQueryTestCase { @@ -86,22 +83,16 @@ public void testValidOutput() { @Override protected void doAssertLuceneQuery(ExactKnnQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { - assertTrue(query instanceof BooleanQuery); - BooleanQuery booleanQuery = (BooleanQuery) query; - boolean foundFunction = false; - for (BooleanClause clause : booleanQuery) { - if (clause.getQuery() instanceof FunctionQuery functionQuery) { - foundFunction = true; - assertTrue(functionQuery.getValueSource() instanceof FloatVectorSimilarityFunction); - String description = functionQuery.getValueSource().description().toLowerCase(Locale.ROOT); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE)) { - assertTrue(description, description.contains("dot_product")); - } else { - assertTrue(description, description.contains("cosine")); - } - } + assertTrue(query instanceof DenseVectorQuery.Floats); + DenseVectorQuery.Floats denseVectorQuery = (DenseVectorQuery.Floats) query; + assertEquals(VECTOR_FIELD, denseVectorQuery.field); + float[] expected = Arrays.copyOf(queryBuilder.getQuery().asFloatVector(), queryBuilder.getQuery().asFloatVector().length); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE)) { + VectorUtil.l2normalize(expected); + assertArrayEquals(expected, denseVectorQuery.getQuery(), 0.0f); + } else { + assertArrayEquals(expected, denseVectorQuery.getQuery(), 0.0f); } - assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java index 2d84dfd0cc907..0d0293b962609 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java @@ -68,7 +68,7 @@ public void testUpdateDataStream() { Index updatedFailureIndex = new Index(failureIndexName, randomUUID()); when(failureIndexMetadata.getIndex()).thenReturn(updatedFailureIndex); - RestoreSnapshotRequest request = new RestoreSnapshotRequest(); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT); DataStream updateDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -103,7 +103,8 @@ public void testUpdateDataStreamRename() { Index renamedFailureIndex = new Index(renamedFailureIndexName, randomUUID()); when(failureIndexMetadata.getIndex()).thenReturn(renamedFailureIndex); - RestoreSnapshotRequest request = new RestoreSnapshotRequest().renamePattern("data-stream-1").renameReplacement("data-stream-2"); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).renamePattern("data-stream-1") + .renameReplacement("data-stream-2"); DataStream renamedDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -138,7 +139,7 @@ public void testPrefixNotChanged() { Index renamedFailureIndex = new Index(renamedFailureIndexName, randomUUID()); when(failureIndexMetadata.getIndex()).thenReturn(renamedFailureIndex); - RestoreSnapshotRequest request = new RestoreSnapshotRequest().renamePattern("ds-").renameReplacement("ds2-"); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).renamePattern("ds-").renameReplacement("ds2-"); DataStream renamedDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -146,7 +147,7 @@ public void testPrefixNotChanged() { assertEquals(List.of(renamedIndex), renamedDataStream.getIndices()); assertEquals(List.of(renamedFailureIndex), renamedDataStream.getFailureIndices().getIndices()); - request = new RestoreSnapshotRequest().renamePattern("ds-000001").renameReplacement("ds2-000001"); + request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).renamePattern("ds-000001").renameReplacement("ds2-000001"); renamedDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -216,7 +217,7 @@ public void testRefreshRepositoryUuidsRefreshesAsNeeded() { public void testNotAllowToRestoreGlobalStateFromSnapshotWithoutOne() { - var request = new RestoreSnapshotRequest().includeGlobalState(true); + var request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).includeGlobalState(true); var repository = new RepositoryMetadata("name", "type", Settings.EMPTY); var snapshot = new Snapshot("repository", new SnapshotId("name", "uuid")); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java index 11b6c98d0f40c..23b1cb63b289b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java @@ -22,7 +22,7 @@ public class SnapshotRequestsTests extends ESTestCase { public void testRestoreSnapshotRequestParsing() throws IOException { - RestoreSnapshotRequest request = new RestoreSnapshotRequest("test-repo", "test-snap"); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap"); XContentBuilder builder = jsonBuilder().startObject(); @@ -87,7 +87,7 @@ public void testRestoreSnapshotRequestParsing() throws IOException { } public void testCreateSnapshotRequestParsing() throws IOException { - CreateSnapshotRequest request = new CreateSnapshotRequest("test-repo", "test-snap"); + CreateSnapshotRequest request = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap"); XContentBuilder builder = jsonBuilder().startObject(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index f4aa44f143c40..b40e33c4baba8 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -268,7 +268,7 @@ public void verifyReposThenStopServices() { // failures seen during the previous test. client().admin() .cluster() - .prepareCreateSnapshot("repo", "last-snapshot") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "last-snapshot") .setWaitForCompletion(true) .setPartial(true) .execute(createSnapshotResponse); @@ -278,7 +278,9 @@ public void verifyReposThenStopServices() { assertThat(snapshotInfo.state(), either(is(SnapshotState.SUCCESS)).or(is(SnapshotState.PARTIAL))); assertThat(snapshotInfo.shardFailures(), iterableWithSize(snapshotInfo.failedShards())); assertThat(snapshotInfo.successfulShards(), is(snapshotInfo.totalShards() - snapshotInfo.failedShards())); - client().admin().cluster().cleanupRepository(new CleanupRepositoryRequest("repo"), cleanupResponse); + client().admin() + .cluster() + .cleanupRepository(new CleanupRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo"), cleanupResponse); }); final AtomicBoolean cleanedUp = new AtomicBoolean(false); continueOrDie(cleanupResponse, r -> cleanedUp.set(true)); @@ -317,7 +319,7 @@ public void testSuccessfulSnapshotAndRestore() { continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> { final Runnable afterIndexing = () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseListener); if (documents == 0) { @@ -350,7 +352,7 @@ public void testSuccessfulSnapshotAndRestore() { ignored -> client().admin() .cluster() .restoreSnapshot( - new RestoreSnapshotRequest(repoName, snapshotName).waitForCompletion(true), + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).waitForCompletion(true), restoreSnapshotResponseListener ) ); @@ -415,7 +417,7 @@ public void testSnapshotWithNodeDisconnects() { } testClusterNodes.randomMasterNodeSafe().client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setPartial(partial) .execute(createSnapshotResponseStepListener); }); @@ -492,7 +494,7 @@ public void testSnapshotDeleteWithMasterFailover() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> testClusterNodes.randomMasterNodeSafe().client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(waitForSnapshot) .execute(createSnapshotResponseStepListener) ); @@ -502,7 +504,7 @@ public void testSnapshotDeleteWithMasterFailover() { scheduleNow(this::disconnectOrRestartMasterNode); testClusterNodes.randomDataNodeSafe().client.admin() .cluster() - .prepareDeleteSnapshot(repoName, snapshotName) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionListener.running(() -> snapshotDeleteResponded.set(true))); }); @@ -544,7 +546,7 @@ public void testConcurrentSnapshotCreateAndDelete() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(createSnapshotResponseStepListener) ); @@ -554,7 +556,10 @@ public void testConcurrentSnapshotCreateAndDelete() { @Override public void clusterChanged(ClusterChangedEvent event) { if (SnapshotsInProgress.get(event.state()).isEmpty() == false) { - client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener); + client().admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .execute(deleteSnapshotStepListener); masterNode.clusterService.removeListener(this); } } @@ -566,7 +571,7 @@ public void clusterChanged(ClusterChangedEvent event) { deleteSnapshotStepListener, acknowledgedResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createAnotherSnapshotResponseStepListener) ); @@ -609,7 +614,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ); @@ -620,7 +625,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { createSnapshotResponseStepListener, createSnapshotResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .execute(createOtherSnapshotResponseStepListener) ); @@ -630,7 +635,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { createOtherSnapshotResponseStepListener, createSnapshotResponse -> client().admin() .cluster() - .prepareDeleteSnapshot(repoName, snapshotName) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(deleteSnapshotStepListener) ); @@ -639,7 +644,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { continueOrDie(deleteSnapshotStepListener, deleted -> { client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createAnotherSnapshotResponseStepListener); continueOrDie( @@ -683,7 +688,7 @@ public void testBulkSnapshotDeleteWithAbort() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ); @@ -697,7 +702,10 @@ public void testBulkSnapshotDeleteWithAbort() { continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> { for (int i = 0; i < inProgressSnapshots; i++) { - client().admin().cluster().prepareCreateSnapshot(repoName, "other-" + i).execute(createSnapshotListener); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "other-" + i) + .execute(createSnapshotListener); } }); @@ -707,7 +715,7 @@ public void testBulkSnapshotDeleteWithAbort() { createOtherSnapshotResponseStepListener, createSnapshotResponse -> client().admin() .cluster() - .deleteSnapshot(new DeleteSnapshotRequest(repoName, "*"), deleteSnapshotStepListener) + .deleteSnapshot(new DeleteSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, "*"), deleteSnapshotStepListener) ); deterministicTaskQueue.runAllRunnableTasks(); @@ -742,7 +750,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { index, () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ) @@ -760,7 +768,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { index, () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, secondSnapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, secondSnapshotName) .setWaitForCompletion(true) .execute(createOtherSnapshotResponseStepListener) ) @@ -770,12 +778,17 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { final SubscribableListener restoreSnapshotResponseListener = new SubscribableListener<>(); continueOrDie(createOtherSnapshotResponseStepListener, createSnapshotResponse -> { - scheduleNow(() -> client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener)); + scheduleNow( + () -> client().admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .execute(deleteSnapshotStepListener) + ); scheduleNow( () -> client().admin() .cluster() .restoreSnapshot( - new RestoreSnapshotRequest(repoName, secondSnapshotName).waitForCompletion(true) + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, secondSnapshotName).waitForCompletion(true) .renamePattern("(.+)") .renameReplacement("restored_$1"), restoreSnapshotResponseListener @@ -871,7 +884,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { createIndicesListener, createIndexResponses -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(false) .setPartial(partialSnapshot) .setIncludeGlobalState(randomBoolean()) @@ -946,7 +959,7 @@ public void testConcurrentDeletes() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ); @@ -962,7 +975,7 @@ public void testConcurrentDeletes() { for (SubscribableListener deleteListener : deleteSnapshotStepListeners) { client().admin() .cluster() - .prepareDeleteSnapshot(repoName, snapshotName) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionListener.wrap(resp -> deleteListener.onResponse(true), e -> { final Throwable unwrapped = ExceptionsHelper.unwrap( e, @@ -1044,12 +1057,15 @@ public void run() { } testClusterNodes.randomDataNodeSafe().client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionListener.running(() -> { createdSnapshot.set(true); testClusterNodes.randomDataNodeSafe().client.admin() .cluster() - .deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName), ActionListener.noop()); + .deleteSnapshot( + new DeleteSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName), + ActionListener.noop() + ); })); scheduleNow( () -> testClusterNodes.randomMasterNodeSafe().client.execute( @@ -1116,7 +1132,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { if (initiatedSnapshot.compareAndSet(false, true)) { client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener); } @@ -1134,7 +1150,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { createSnapshotResponse -> client().admin() .cluster() .restoreSnapshot( - new RestoreSnapshotRequest(repoName, snapshotName).renamePattern(index) + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).renamePattern(index) .renameReplacement(restoredIndex) .waitForCompletion(true), restoreSnapshotResponseStepListener @@ -1210,7 +1226,7 @@ public void testRunConcurrentSnapshots() { scheduleNow( () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(snapshotListener) ); @@ -1305,7 +1321,7 @@ public TransportRequestHandler interceptHandler( try (var listeners = new RefCountingListener(stepListener)) { client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(listeners.acquire(createRepoResponse -> {})); @@ -1325,7 +1341,7 @@ public TransportRequestHandler interceptHandler( .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, originalSnapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, originalSnapshotName) .setWaitForCompletion(true) .execute(l.map(v -> null)) ); @@ -1341,7 +1357,7 @@ public TransportRequestHandler interceptHandler( stepListener, l -> client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(randomNonEmptySubsetOf(indices).toArray(String[]::new)) .setPartial(true) .execute(l.map(v1 -> null)) @@ -1366,7 +1382,7 @@ public TransportRequestHandler interceptHandler( ).addListener(l); client.admin() .cluster() - .prepareCloneSnapshot(repoName, originalSnapshotName, cloneName) + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, originalSnapshotName, cloneName) .setIndices(randomNonEmptySubsetOf(indices).toArray(String[]::new)) .execute(ActionTestUtils.assertNoFailureListener(r -> {})); }))); @@ -1402,7 +1418,7 @@ public void testFullSnapshotUnassignedShards() { try (var listeners = new RefCountingListener(stepListener)) { client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(listeners.acquire(createRepoResponse -> {})); @@ -1429,7 +1445,7 @@ public void testFullSnapshotUnassignedShards() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, randomIdentifier()) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()) .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @Override @@ -1480,7 +1496,7 @@ public void testSnapshotNameAlreadyInUseExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(l) ) @@ -1488,7 +1504,7 @@ public void testSnapshotNameAlreadyInUseExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @Override @@ -1507,7 +1523,7 @@ public void onFailure(Exception e) { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCloneSnapshot(repoName, snapshotName, snapshotName) + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName, snapshotName) .setIndices("*") .execute(new ActionListener<>() { @Override @@ -1557,7 +1573,7 @@ public void testIndexNotFoundExceptionLogging() { .newForked( l -> client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(l) @@ -1566,7 +1582,7 @@ public void testIndexNotFoundExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, randomIdentifier()) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()) .setIndices(indexName) .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @@ -1609,7 +1625,7 @@ public void testIllegalArgumentExceptionLogging() { .newForked( l -> client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(l) @@ -1618,7 +1634,7 @@ public void testIllegalArgumentExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, randomIdentifier()) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()) .setFeatureStates("none", "none") .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @@ -1663,7 +1679,7 @@ private SubscribableListener createRepoAndIndex(String repo client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(createRepositoryListener); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java index ffbc7e62f1ca8..4eba899a4fb15 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java @@ -43,14 +43,14 @@ public void testSerialization() throws IOException { } public void testCreateSnapshotRequestDescrptions() { - CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(); + CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT); createSnapshotRequest.snapshot("snapshot_name"); createSnapshotRequest.repository("repo_name"); assertEquals("snapshot [repo_name:snapshot_name]", createSnapshotRequest.getDescription()); } public void testRestoreSnapshotRequestDescrptions() { - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(); + RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT); restoreSnapshotRequest.snapshot("snapshot_name"); restoreSnapshotRequest.repository("repo_name"); assertEquals("snapshot [repo_name:snapshot_name]", restoreSnapshotRequest.getDescription()); diff --git a/test/external-modules/jvm-crash/build.gradle b/test/external-modules/jvm-crash/build.gradle new file mode 100644 index 0000000000000..7269f6aa9b995 --- /dev/null +++ b/test/external-modules/jvm-crash/build.gradle @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +import org.elasticsearch.gradle.internal.info.BuildParams + +apply plugin: 'elasticsearch.internal-java-rest-test' +// Necessary to use tests in Serverless +apply plugin: 'elasticsearch.internal-test-artifact' + +group = 'org.elasticsearch.plugin' + +esplugin { + description 'A test module that can trigger A JVM crash' + classname 'org.elasticsearch.test.jvm_crash.JvmCrashPlugin' +} + +tasks.named('javaRestTest') { + usesDefaultDistribution() + it.onlyIf("snapshot build") { BuildParams.isSnapshotBuild() } +} diff --git a/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java new file mode 100644 index 0000000000000..3e73310ee824f --- /dev/null +++ b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import org.apache.lucene.util.Constants; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.AbstractLocalClusterSpecBuilder; +import org.elasticsearch.test.cluster.local.DefaultEnvironmentProvider; +import org.elasticsearch.test.cluster.local.DefaultLocalClusterFactory; +import org.elasticsearch.test.cluster.local.DefaultLocalElasticsearchCluster; +import org.elasticsearch.test.cluster.local.DefaultSettingsProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.local.distribution.LocalDistributionResolver; +import org.elasticsearch.test.cluster.local.distribution.ReleasedDistributionResolver; +import org.elasticsearch.test.cluster.local.distribution.SnapshotDistributionResolver; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matcher; +import org.junit.AfterClass; +import org.junit.ClassRule; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.matchesRegex; +import static org.hamcrest.Matchers.not; + +public class JvmCrashIT extends ESRestTestCase { + + private static class StdOutCatchingClusterBuilder extends AbstractLocalClusterSpecBuilder { + + private StdOutCatchingClusterBuilder() { + this.settings(new DefaultSettingsProvider()); + this.environment(new DefaultEnvironmentProvider()); + } + + @Override + public ElasticsearchCluster build() { + // redirect stdout before the nodes start up + // they are referenced directly by ProcessUtils, so can't be changed afterwards + redirectStdout(); + + return new DefaultLocalElasticsearchCluster<>( + this::buildClusterSpec, + new DefaultLocalClusterFactory( + new LocalDistributionResolver(new SnapshotDistributionResolver(new ReleasedDistributionResolver())) + ) + ); + } + } + + private static PrintStream originalOut; + private static ByteArrayOutputStream stdOutput; + + private static void redirectStdout() { + if (originalOut == null) { + originalOut = System.out; + stdOutput = new ByteArrayOutputStream(); + // this duplicates the crash messages, but not the log output. That's ok. + System.setOut(new TeePrintStream(originalOut, stdOutput)); + } + } + + @ClassRule + public static ElasticsearchCluster cluster = new StdOutCatchingClusterBuilder().distribution(DistributionType.INTEG_TEST) + .nodes(1) + .module("test-jvm-crash") + .setting("xpack.security.enabled", "false") + .jvmArg("-Djvm.crash=true") + .build(); + + @AfterClass + public static void resetStdout() { + if (originalOut != null) { + System.setOut(originalOut); + } + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testJvmCrash() throws Exception { + final long pid = getElasticsearchPid(); + assertJvmArgs(pid, containsString("-Djvm.crash=true")); + + expectThrows(IOException.class, () -> client().performRequest(new Request("GET", "/_crash"))); + + // the Elasticsearch process should die + assertBusy(() -> assertJvmArgs(pid, not(containsString("-Djvm.crash=true")))); + + // parse the logs and ensure that Elasticsearch died with the expected cause + assertThat( + stdOutput, + hasToString( + matchesRegex( + Pattern.compile(".*# A fatal error has been detected by the Java Runtime Environment:.*SIGSEGV.*", Pattern.DOTALL) + ) + ) + ); + } + + private Process startJcmd(long pid) throws IOException { + final String jcmdPath = PathUtils.get(System.getProperty("java.home"), "bin/jcmd").toString(); + return new ProcessBuilder().command(jcmdPath, Long.toString(pid), "VM.command_line").redirectErrorStream(true).start(); + } + + private void assertJvmArgs(long pid, Matcher matcher) throws IOException, InterruptedException { + Process jcmdProcess = startJcmd(pid); + + if (Constants.WINDOWS) { + // jcmd on windows appears to have a subtle bug where if the process being connected to + // dies while jcmd is running, it can hang indefinitely. Here we detect this case by + // waiting a fixed amount of time, and then killing/retrying the process + boolean exited = jcmdProcess.waitFor(10, TimeUnit.SECONDS); + if (exited == false) { + logger.warn("jcmd hung, killing process and retrying"); + jcmdProcess.destroyForcibly(); + jcmdProcess = startJcmd(pid); + } + } + + List outputLines = readLines(jcmdProcess.getInputStream()); + + String jvmArgs = outputLines.stream().filter(l -> l.startsWith("jvm_args")).findAny().orElse(null); + try { + assertThat(jvmArgs, matcher); + } catch (AssertionError ae) { + logger.error("Failed matcher for jvm pid " + pid); + logger.error("jcmd output: " + String.join("\n", outputLines)); + throw ae; + } + } + + private long getElasticsearchPid() throws IOException { + Response response = client().performRequest(new Request("GET", "/_nodes/process")); + @SuppressWarnings("unchecked") + var nodesInfo = (Map) entityAsMap(response).get("nodes"); + @SuppressWarnings("unchecked") + var nodeInfo = (Map) nodesInfo.values().iterator().next(); + @SuppressWarnings("unchecked") + var processInfo = (Map) nodeInfo.get("process"); + Object stringPid = processInfo.get("id"); + return Long.parseLong(stringPid.toString()); + } + + private List readLines(InputStream is) throws IOException { + try (BufferedReader in = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8))) { + return in.lines().toList(); + } + } + + @Override + protected boolean preserveClusterUponCompletion() { + // as the cluster is dead its state can not be wiped successfully so we have to bypass wiping the cluster + return true; + } +} diff --git a/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/TeePrintStream.java b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/TeePrintStream.java new file mode 100644 index 0000000000000..9593dbf387d62 --- /dev/null +++ b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/TeePrintStream.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import java.io.OutputStream; +import java.io.PrintStream; + +/** + * Copies output to another {@code PrintStream}, as well as an {@code OutputStream} + */ +class TeePrintStream extends PrintStream { + private final PrintStream delegate; + + TeePrintStream(PrintStream delegate, OutputStream out) { + super(out); + this.delegate = delegate; + } + + @Override + public void flush() { + delegate.flush(); + super.flush(); + } + + @Override + public void close() { + delegate.close(); + super.close(); + } + + @Override + public boolean checkError() { + return delegate.checkError() || super.checkError(); + } + + @Override + public void write(int b) { + delegate.write(b); + super.write(b); + } + + @Override + public void write(byte[] buf, int off, int len) { + delegate.write(buf, off, len); + super.write(buf, off, len); + } + + @Override + public void print(boolean b) { + delegate.print(b); + super.print(b); + } + + @Override + public void print(char c) { + delegate.print(c); + super.print(c); + } + + @Override + public void print(int i) { + delegate.print(i); + super.print(i); + } + + @Override + public void print(long l) { + delegate.print(l); + super.print(l); + } + + @Override + public void print(float f) { + delegate.print(f); + super.print(f); + } + + @Override + public void print(double d) { + delegate.print(d); + super.print(d); + } + + @Override + public void print(char[] s) { + delegate.print(s); + super.print(s); + } + + @Override + public void print(String s) { + delegate.print(s); + super.print(s); + } + + @Override + public void print(Object obj) { + delegate.print(obj); + super.print(obj); + } + + @Override + public void println() { + delegate.println(); + super.println(); + } +} diff --git a/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/JvmCrashPlugin.java b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/JvmCrashPlugin.java new file mode 100644 index 0000000000000..2bc4d969cba59 --- /dev/null +++ b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/JvmCrashPlugin.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class JvmCrashPlugin extends Plugin implements ActionPlugin { + @Override + public List getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new RestJvmCrashAction()); + } +} diff --git a/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/RestJvmCrashAction.java b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/RestJvmCrashAction.java new file mode 100644 index 0000000000000..ec9fceaea9c6f --- /dev/null +++ b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/RestJvmCrashAction.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestJvmCrashAction implements RestHandler { + + // Turns out, it's actually quite hard to get the JVM to crash... + private static Method FREE_MEMORY; + private static Object UNSAFE; + static { + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + Class unsafe = Class.forName("sun.misc.Unsafe"); + + FREE_MEMORY = unsafe.getMethod("freeMemory", long.class); + Field f = unsafe.getDeclaredField("theUnsafe"); + f.setAccessible(true); + UNSAFE = f.get(null); + return null; + }); + } catch (Exception e) { + throw new AssertionError(e); + } + } + + RestJvmCrashAction() {} + + @Override + public List routes() { + return List.of(new Route(GET, "/_crash")); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + // BIG BADDA BOOM + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> FREE_MEMORY.invoke(UNSAFE, 1L)); + } catch (Exception e) { + throw new AssertionError(e); + } + } +} diff --git a/test/external-modules/jvm-crash/src/main/plugin-metadata/plugin-security.policy b/test/external-modules/jvm-crash/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..860ae72b058db --- /dev/null +++ b/test/external-modules/jvm-crash/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,6 @@ +grant { + // various permissions to fiddle with Unsafe + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; +}; diff --git a/test/fixtures/krb5kdc-fixture/build.gradle b/test/fixtures/krb5kdc-fixture/build.gradle index c671d58e1e395..733bfd1d4bd29 100644 --- a/test/fixtures/krb5kdc-fixture/build.gradle +++ b/test/fixtures/krb5kdc-fixture/build.gradle @@ -22,7 +22,7 @@ dockerFixtures { configurations { all { - transitive = false + exclude group: 'org.hamcrest', module: 'hamcrest-core' } krb5ConfHdfsFile { canBeConsumed = true @@ -36,21 +36,18 @@ configurations { dependencies { testImplementation project(':test:framework') - api "junit:junit:${versions.junit}" api project(':test:fixtures:testcontainer-utils') - api "org.testcontainers:testcontainers:${versions.testcontainer}" - implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + api("org.testcontainers:testcontainers:${versions.testcontainer}") { + transitive = false + } + implementation("com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"){ + transitive = false + } implementation "org.slf4j:slf4j-api:${versions.slf4j}" - implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" + // implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" - runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" - runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" - // ensure we have proper logging during when used in tests runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 0486022620398..a6b737f162547 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -815,7 +815,7 @@ private void roundTripSyntheticSource(DocumentMapper mapper, String syntheticSou } } - private static String syntheticSource(DocumentMapper mapper, IndexReader reader, int docId) throws IOException { + protected static String syntheticSource(DocumentMapper mapper, IndexReader reader, int docId) throws IOException { LeafReader leafReader = getOnlyLeafReader(reader); final String synthetic1; diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index 97f17858e753d..c6c9f5b727980 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -470,7 +470,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) // create repo assertAcked( - clusterAdmin().preparePutRepository(REPO_NAME) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPO_NAME) .setType("fs") .setSettings( Settings.builder() @@ -481,7 +481,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) ); // create snapshot - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, SNAP_NAME) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -492,7 +492,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) ); assertThat( - clusterAdmin().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS) ); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index 7cdeaeedfdeaf..faada33eade83 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -71,7 +71,7 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { deleteAndAssertEmpty(getRepository().basePath()); - clusterAdmin().prepareDeleteRepository(TEST_REPO_NAME).get(); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME).get(); super.tearDown(); } @@ -105,10 +105,11 @@ public void testCreateSnapshot() { final String snapshotName = "test-snap-" + System.currentTimeMillis(); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, snapshotName) - .setWaitForCompletion(true) - .setIndices("test-idx-*", "-test-idx-3") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + snapshotName + ).setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -116,11 +117,16 @@ public void testCreateSnapshot() { ); assertThat( - clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).setSnapshots(snapshotName).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0) + .state(), equalTo(SnapshotState.SUCCESS) ); - assertTrue(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, snapshotName).get().isAcknowledged()); + assertTrue(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, snapshotName).get().isAcknowledged()); } public void testListChildren() { @@ -178,10 +184,11 @@ public void testCleanup() throws Exception { final String snapshotName = "test-snap-" + System.currentTimeMillis(); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, snapshotName) - .setWaitForCompletion(true) - .setIndices("test-idx-*", "-test-idx-3") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + snapshotName + ).setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -189,7 +196,12 @@ public void testCleanup() throws Exception { ); assertThat( - clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).setSnapshots(snapshotName).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0) + .state(), equalTo(SnapshotState.SUCCESS) ); @@ -201,7 +213,7 @@ public void testCleanup() throws Exception { createDanglingIndex(repo, genericExec); logger.info("--> deleting a snapshot to trigger repository cleanup"); - clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, snapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, snapshotName).get(); BlobStoreTestUtil.assertConsistency(repo); @@ -209,7 +221,11 @@ public void testCleanup() throws Exception { createDanglingIndex(repo, genericExec); logger.info("--> Execute repository cleanup"); - final CleanupRepositoryResponse response = clusterAdmin().prepareCleanupRepository(TEST_REPO_NAME).get(); + final CleanupRepositoryResponse response = clusterAdmin().prepareCleanupRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME + ).get(); assertCleanupResponse(response, 3L, 1L); } @@ -228,23 +244,35 @@ public void testIndexLatest() throws Exception { final var repository = getRepository(); final var blobContents = new HashSet(); - final var createSnapshot1Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + final var createSnapshot1Response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, randomIdentifier()) .setWaitForCompletion(true) .get(); assertTrue(blobContents.add(readIndexLatest(repository))); - clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).get(); + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME).get(); assertFalse(blobContents.add(readIndexLatest(repository))); - final var createSnapshot2Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + final var createSnapshot2Response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, randomIdentifier()) .setWaitForCompletion(true) .get(); assertTrue(blobContents.add(readIndexLatest(repository))); - assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot1Response.getSnapshotInfo().snapshotId().getName())); + assertAcked( + clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + createSnapshot1Response.getSnapshotInfo().snapshotId().getName() + ) + ); assertTrue(blobContents.add(readIndexLatest(repository))); - assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot2Response.getSnapshotInfo().snapshotId().getName())); + assertAcked( + clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + createSnapshot2Response.getSnapshotInfo().snapshotId().getName() + ) + ); assertTrue(blobContents.add(readIndexLatest(repository))); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index b1765218ff7f2..6951e1941686d 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -101,7 +101,12 @@ protected final String createRepository(final String name, final boolean verify) protected final String createRepository(final String name, final Settings settings, final boolean verify) { logger.info("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); - assertAcked(clusterAdmin().preparePutRepository(name).setType(repositoryType()).setVerify(verify).setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) + .setType(repositoryType()) + .setVerify(verify) + .setSettings(settings) + ); internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { assertThat(repositories.repository(name), notNullValue()); @@ -116,7 +121,7 @@ protected final String createRepository(final String name, final Settings settin protected final void deleteRepository(final String name) { logger.debug("--> deleting repository [name: {}]", name); - assertAcked(clusterAdmin().prepareDeleteRepository(name)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name)); internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { RepositoryMissingException e = expectThrows(RepositoryMissingException.class, () -> repositories.repository(name)); assertThat(e.repository(), equalTo(name)); @@ -316,7 +321,9 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t final String snapshotName = randomName(); logger.info("--> create snapshot {}:{}", repoName, snapshotName); assertSuccessfulSnapshot( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(indexNames) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setIndices(indexNames) ); List deleteIndices = randomSubsetOf(randomIntBetween(0, indexCount), indexNames); @@ -351,7 +358,9 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t } logger.info("--> restore all indices from the snapshot"); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).setWaitForCompletion(true) + ); // higher timeout since we can have quite a few shards and a little more data here ensureGreen(TimeValue.timeValueSeconds(120)); @@ -361,15 +370,23 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t } logger.info("--> delete snapshot {}:{}", repoName, snapshotName); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).get()); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(snapshotName).get()); + expectThrows( + SnapshotMissingException.class, + () -> clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotName).get() + ); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); + expectThrows( + SnapshotMissingException.class, + () -> clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).get() + ); expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get() + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(randomBoolean()) + .get() ); } @@ -395,7 +412,9 @@ public void testMultipleSnapshotAndRollback() throws Exception { docCounts[i] = (int) SearchResponseUtils.getTotalHitsValue(prepareSearch(indexName).setSize(0)); logger.info("--> create snapshot {}:{} with {} documents", repoName, snapshotName + "-" + i, docCounts[i]); assertSuccessfulSnapshot( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName + "-" + i).setWaitForCompletion(true).setIndices(indexName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName + "-" + i) + .setWaitForCompletion(true) + .setIndices(indexName) ); } @@ -411,7 +430,8 @@ public void testMultipleSnapshotAndRollback() throws Exception { logger.info("--> restore index from the snapshot"); assertSuccessfulRestore( - clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName + "-" + iterationToRestore).setWaitForCompletion(true) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName + "-" + iterationToRestore) + .setWaitForCompletion(true) ); ensureGreen(); @@ -420,7 +440,7 @@ public void testMultipleSnapshotAndRollback() throws Exception { for (int i = 0; i < iterationCount; i++) { logger.info("--> delete snapshot {}:{}", repoName, snapshotName + "-" + i); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName + "-" + i).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName + "-" + i).get()); } } @@ -451,7 +471,7 @@ public void testIndicesDeletedFromRepository() throws Exception { logger.info("--> take a snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap") .setWaitForCompletion(true) .get(); assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); @@ -466,14 +486,14 @@ public void testIndicesDeletedFromRepository() throws Exception { logger.info("--> take another snapshot with only 2 of the 3 indices"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, "test-snap2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap2") .setWaitForCompletion(true) .setIndices("test-idx-1", "test-idx-2") .get(); assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); logger.info("--> delete a snapshot"); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, "test-snap").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap").get()); logger.info("--> verify index folder deleted from blob container"); RepositoriesService repositoriesSvc = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); @@ -493,7 +513,7 @@ public void testIndicesDeletedFromRepository() throws Exception { } } - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, "test-snap2").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap2").get()); } public void testBlobStoreBulkDeletion() throws Exception { @@ -541,7 +561,7 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { SnapshotState.SUCCESS, client.admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-1") .setWaitForCompletion(true) .get() .getSnapshotInfo() @@ -571,14 +591,14 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { ); final var snapshot2Info = client.admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .setWaitForCompletion(true) .get() .getSnapshotInfo(); assertEquals(SnapshotState.SUCCESS, snapshot2Info.state()); // Delete the first snapshot, which should leave only the blobs from snapshot-2 - assertAcked(client.admin().cluster().prepareDeleteSnapshot(repoName, "snapshot-1")); + assertAcked(client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-1")); // Retrieve the blobs actually present final var actualBlobs = shardContainer.listBlobs(randomPurpose()) @@ -616,7 +636,7 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { ); } - assertAcked(client.admin().cluster().prepareDeleteSnapshot(repoName, "snapshot-2")); + assertAcked(client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2")); } protected void addRandomDocuments(String name, int numDocs) throws InterruptedException { diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java index 43b0fb7025bd8..b028659eb8d46 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java @@ -51,11 +51,13 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce final String snapshotName = randomName(); logger.info("--> create snapshot {}:{}", repoName, snapshotName); assertSuccessfulSnapshot( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(indexName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setIndices(indexName) ); assertAcked(client().admin().indices().prepareDelete(indexName)); - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); final Path deletedPath; try (Stream contents = Files.list(repoPath.resolve("indices"))) { @@ -69,7 +71,9 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce final ElasticsearchException exception = expectThrows( ElasticsearchException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get() + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(randomBoolean()) + .get() ); assertThat(exception.getRootCause(), instanceOf(NoSuchFileException.class)); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index 22bbfad3cfb70..a79ba296e7554 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -169,15 +169,19 @@ public final void testSnapshotWithLargeSegmentFiles() throws Exception { assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); final String snapshot = "snapshot"; - assertSuccessfulSnapshot(clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)); + assertSuccessfulSnapshot( + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); assertAcked(client().admin().indices().prepareDelete(index)); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true) + ); ensureGreen(index); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get()); } public void testRequestStats() throws Exception { @@ -198,15 +202,19 @@ public void testRequestStats() throws Exception { assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); final String snapshot = "snapshot"; - assertSuccessfulSnapshot(clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)); + assertSuccessfulSnapshot( + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); assertAcked(client().admin().indices().prepareDelete(index)); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true) + ); ensureGreen(index); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get()); final RepositoryStats repositoryStats = StreamSupport.stream( internalCluster().getInstances(RepositoriesService.class).spliterator(), diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 2e7ce0400d78b..23ea4cc95fa35 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -144,11 +144,11 @@ public void verifyNoLeakedListeners() throws Exception { @After public void assertRepoConsistency() { if (skipRepoConsistencyCheckReason == null) { - clusterAdmin().prepareGetRepositories().get().repositories().forEach(repositoryMetadata -> { + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT).get().repositories().forEach(repositoryMetadata -> { final String name = repositoryMetadata.name(); if (repositoryMetadata.settings().getAsBoolean(READONLY_SETTING_KEY, false) == false) { - clusterAdmin().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); - clusterAdmin().prepareCleanupRepository(name).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name).get(); } BlobStoreTestUtil.assertConsistency(getRepositoryOnMaster(name)); }); @@ -318,7 +318,12 @@ protected void createRepository(String repoName, String type, Settings.Builder s public static void createRepository(Logger logger, String repoName, String type, Settings.Builder settings, boolean verify) { logger.info("--> creating or updating repository [{}] [{}]", repoName, type); - assertAcked(clusterAdmin().preparePutRepository(repoName).setVerify(verify).setType(type).setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) + .setVerify(verify) + .setType(type) + .setSettings(settings) + ); } protected void createRepository(String repoName, String type, Settings.Builder settings) { @@ -342,7 +347,7 @@ public static void createRepository(Logger logger, String repoName, String type) } protected void deleteRepository(String repoName) { - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); } public static Settings.Builder randomRepositorySettings() { @@ -381,10 +386,11 @@ protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) t protected String initWithSnapshotVersion(String repoName, Path repoPath, IndexVersion version) throws Exception { assertThat("This hack only works on an empty repository", getRepositoryData(repoName).getSnapshotIds(), empty()); final String oldVersionSnapshot = OLD_VERSION_SNAPSHOT_PREFIX + version.id(); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, oldVersionSnapshot) - .setIndices("does-not-exist-for-sure-*") - .setWaitForCompletion(true) - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + oldVersionSnapshot + ).setIndices("does-not-exist-for-sure-*").setWaitForCompletion(true).get(); final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); assertThat(snapshotInfo.totalShards(), is(0)); @@ -442,7 +448,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, IndexVe final RepositoryMetadata repoMetadata = blobStoreRepository.getMetadata(); if (BlobStoreRepository.CACHE_REPOSITORY_DATA.get(repoMetadata.settings())) { logger.info("--> recreating repository to clear caches"); - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); createRepository(repoName, repoMetadata.type(), Settings.builder().put(repoMetadata.settings())); } return oldVersionSnapshot; @@ -454,7 +460,7 @@ protected SnapshotInfo createFullSnapshot(String repoName, String snapshotName) public static SnapshotInfo createFullSnapshot(Logger logger, String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] in [{}]", snapshotName, repoName); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIncludeGlobalState(true) .setWaitForCompletion(true) .get(); @@ -466,7 +472,7 @@ public static SnapshotInfo createFullSnapshot(Logger logger, String repoName, St protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, List indices, List featureStates) { logger.info("--> creating snapshot [{}] of {} in [{}]", snapshot, indices, repositoryName); - final CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(repositoryName, snapshot) + final CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot) .setIndices(indices.toArray(Strings.EMPTY_ARRAY)) .setWaitForCompletion(true) .setFeatureStates(featureStates.toArray(Strings.EMPTY_ARRAY)) @@ -604,7 +610,10 @@ public static ActionFuture startFullSnapshot( boolean partial ) { logger.info("--> creating full snapshot [{}] to repo [{}]", snapshotName, repoName); - return clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setPartial(partial).execute(); + return clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setPartial(partial) + .execute(); } protected void awaitNumberOfSnapshotsInProgress(int count) throws Exception { @@ -642,12 +651,15 @@ protected void createIndexWithContent(String indexName, Settings indexSettings) protected ActionFuture startDeleteSnapshot(String repoName, String snapshotName) { logger.info("--> deleting snapshot [{}] from repo [{}]", snapshotName, repoName); - return clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).execute(); + return clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).execute(); } protected ActionFuture startDeleteSnapshots(String repoName, List snapshotNames, String viaNode) { logger.info("--> deleting snapshots {} from repo [{}]", snapshotNames, repoName); - return client(viaNode).admin().cluster().prepareDeleteSnapshot(repoName, snapshotNames.toArray(Strings.EMPTY_ARRAY)).execute(); + return client(viaNode).admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotNames.toArray(Strings.EMPTY_ARRAY)) + .execute(); } protected static void updateClusterState(final Function updater) throws Exception { @@ -673,7 +685,10 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) } protected SnapshotInfo getSnapshot(String repository, String snapshot) { - final List snapshotInfos = clusterAdmin().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots(); + final List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository) + .setSnapshots(snapshot) + .get() + .getSnapshots(); assertThat(snapshotInfos, hasSize(1)); return snapshotInfos.get(0); } @@ -704,16 +719,18 @@ public static List createNSnapshots(Logger logger, String repoName, int final String snapshot = prefix + i; snapshotNames.add(snapshot); final Map userMetadata = randomUserMetadata(); - clusterAdmin().prepareCreateSnapshot(repoName, snapshot) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setUserMetadata(userMetadata) .execute(snapshotsListener.delegateFailure((l, response) -> { final SnapshotInfo snapshotInfoInResponse = response.getSnapshotInfo(); assertEquals(userMetadata, snapshotInfoInResponse.userMetadata()); - clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(snapshot).execute(l.safeMap(getResponse -> { - assertEquals(snapshotInfoInResponse, getResponse.getSnapshots().get(0)); - return response; - })); + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshot) + .execute(l.safeMap(getResponse -> { + assertEquals(snapshotInfoInResponse, getResponse.getSnapshots().get(0)); + return response; + })); })); } for (CreateSnapshotResponse snapshotResponse : allSnapshotsDone.get()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 374854626703d..73acdb6e19d4b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -218,22 +218,25 @@ public void wipeRepositories(String... repositories) { for (String repository : repositories) { ActionListener.run( listeners.acquire(), - l -> client().admin().cluster().prepareDeleteRepository(repository).execute(new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse acknowledgedResponse) { - l.onResponse(null); - } - - @Override - public void onFailure(Exception e) { - if (e instanceof RepositoryMissingException) { - // ignore + l -> client().admin() + .cluster() + .prepareDeleteRepository(ESTestCase.TEST_REQUEST_TIMEOUT, ESTestCase.TEST_REQUEST_TIMEOUT, repository) + .execute(new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { l.onResponse(null); - } else { - l.onFailure(e); } - } - }) + + @Override + public void onFailure(Exception e) { + if (e instanceof RepositoryMissingException) { + // ignore + l.onResponse(null); + } else { + l.onFailure(e); + } + } + }) ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 3f0f6c91443ad..9e88d66c430e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -2024,7 +2024,7 @@ protected static void registerRepository(RestClient restClient, String repositor final Request request = newXContentRequest( HttpMethod.PUT, "/_snapshot/" + repository, - new PutRepositoryRequest(repository).type(type).settings(settings) + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repository).type(type).settings(settings) ); request.addParameter("verify", Boolean.toString(verify)); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java index cdcc0e495582a..9fe7fc647455e 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java @@ -29,9 +29,11 @@ import org.elasticsearch.index.fielddata.IndexHistogramFieldData; import org.elasticsearch.index.fielddata.LeafHistogramFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.CompositeSyntheticFieldLoader; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.IgnoreMalformedStoredValues; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.SourceLoader; @@ -44,6 +46,7 @@ import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.sort.BucketedSort; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.CopyingXContentParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -287,8 +290,12 @@ protected boolean supportsParsingObject() { @Override public void parse(DocumentParserContext context) throws IOException { context.path().add(simpleName()); + + boolean shouldStoreMalformedDataForSyntheticSource = context.mappingLookup().isSourceSynthetic() && ignoreMalformed(); XContentParser.Token token; XContentSubParser subParser = null; + XContentBuilder malformedDataForSyntheticSource = null; + try { token = context.parser().currentToken(); if (token == XContentParser.Token.VALUE_NULL) { @@ -299,10 +306,16 @@ public void parse(DocumentParserContext context) throws IOException { ArrayList counts = null; // should be an object ensureExpectedToken(XContentParser.Token.START_OBJECT, token, context.parser()); - subParser = new XContentSubParser(context.parser()); + if (shouldStoreMalformedDataForSyntheticSource) { + var copyingParser = new CopyingXContentParser(context.parser()); + malformedDataForSyntheticSource = copyingParser.getBuilder(); + subParser = new XContentSubParser(copyingParser); + } else { + subParser = new XContentSubParser(context.parser()); + } token = subParser.nextToken(); while (token != XContentParser.Token.END_OBJECT) { - // should be an field + // should be a field ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, subParser); String fieldName = subParser.currentName(); if (fieldName.equals(VALUES_FIELD.getPreferredName())) { @@ -427,7 +440,17 @@ public void parse(DocumentParserContext context) throws IOException { if (subParser != null) { // close the subParser so we advance to the end of the object subParser.close(); + } else if (shouldStoreMalformedDataForSyntheticSource) { + // We have a malformed value, but it's not an object given that `subParser` is null. + // So we just remember whatever it is. + malformedDataForSyntheticSource = XContentBuilder.builder(context.parser().contentType().xContent()) + .copyCurrentStructure(context.parser()); + } + + if (malformedDataForSyntheticSource != null) { + context.doc().add(IgnoreMalformedStoredValues.storedField(name(), malformedDataForSyntheticSource)); } + context.addIgnoredField(fieldType().name()); } context.path().remove(); @@ -491,76 +514,85 @@ protected SyntheticSourceMode syntheticSourceMode() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (ignoreMalformed.value()) { - throw new IllegalArgumentException( - "field [" + name() + "] of type [histogram] doesn't support synthetic source because it ignores malformed histograms" - ); - } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( "field [" + name() + "] of type [histogram] doesn't support synthetic source because it declares copy_to" ); } - return new SourceLoader.SyntheticFieldLoader() { - private final InternalHistogramValue value = new InternalHistogramValue(); - private BytesRef binaryValue; - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } + return new CompositeSyntheticFieldLoader( + simpleName(), + name(), + new HistogramSyntheticFieldLoader(), + new CompositeSyntheticFieldLoader.MalformedValuesLayer(name()) + ); + } - @Override - public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { - BinaryDocValues docValues = leafReader.getBinaryDocValues(fieldType().name()); - if (docValues == null) { - // No values in this leaf - binaryValue = null; - return null; - } - return docId -> { - if (docValues.advanceExact(docId)) { - binaryValue = docValues.binaryValue(); - return true; - } - binaryValue = null; - return false; - }; - } + private class HistogramSyntheticFieldLoader implements CompositeSyntheticFieldLoader.SyntheticFieldLoaderLayer { + private final InternalHistogramValue value = new InternalHistogramValue(); + private BytesRef binaryValue; - @Override - public boolean hasValue() { - return binaryValue != null; - } + @Override + public Stream> storedFieldLoaders() { + return Stream.of(); + } - @Override - public void write(XContentBuilder b) throws IOException { - if (binaryValue == null) { - return; + @Override + public SourceLoader.SyntheticFieldLoader.DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) + throws IOException { + BinaryDocValues docValues = leafReader.getBinaryDocValues(fieldType().name()); + if (docValues == null) { + // No values in this leaf + binaryValue = null; + return null; + } + return docId -> { + if (docValues.advanceExact(docId)) { + binaryValue = docValues.binaryValue(); + return true; } - b.startObject(simpleName()); + binaryValue = null; + return false; + }; + } - value.reset(binaryValue); - b.startArray("values"); - while (value.next()) { - b.value(value.value()); - } - b.endArray(); + @Override + public boolean hasValue() { + return binaryValue != null; + } - value.reset(binaryValue); - b.startArray("counts"); - while (value.next()) { - b.value(value.count()); - } - b.endArray(); + @Override + public void write(XContentBuilder b) throws IOException { + if (binaryValue == null) { + return; + } + b.startObject(); - b.endObject(); + value.reset(binaryValue); + b.startArray("values"); + while (value.next()) { + b.value(value.value()); } + b.endArray(); - @Override - public String fieldName() { - return name(); + value.reset(binaryValue); + b.startArray("counts"); + while (value.next()) { + b.value(value.count()); } - }; - } + b.endArray(); + + b.endObject(); + } + + @Override + public String fieldName() { + return name(); + } + + @Override + public long valueCount() { + return binaryValue != null ? 1 : 0; + } + }; } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java index 5e2bdaf2d465e..6fcbf20b8657f 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapperTests.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.analytics.mapper; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.MappedFieldType; @@ -15,6 +17,7 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.analytics.AnalyticsPlugin; import org.junit.AssumptionViolatedException; @@ -26,7 +29,6 @@ import java.util.Map; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.matchesPattern; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -121,9 +123,44 @@ protected boolean supportsIgnoreMalformed() { @Override protected List exampleMalformedValues() { + var randomString = randomAlphaOfLengthBetween(1, 10); + var randomLong = randomLong(); + var randomDouble = randomDouble(); + var randomBoolean = randomBoolean(); + return List.of( + exampleMalformedValue(b -> b.value(randomString)).errorMatches( + "Failed to parse object: expecting token of type [START_OBJECT]" + ), + exampleMalformedValue(b -> b.value(randomLong)).errorMatches("Failed to parse object: expecting token of type [START_OBJECT]"), + exampleMalformedValue(b -> b.value(randomDouble)).errorMatches( + "Failed to parse object: expecting token of type [START_OBJECT]" + ), + exampleMalformedValue(b -> b.value(randomBoolean)).errorMatches( + "Failed to parse object: expecting token of type [START_OBJECT]" + ), + exampleMalformedValue(b -> b.startObject().endObject()).errorMatches("expected field called [values]"), exampleMalformedValue(b -> b.startObject().startArray("values").value(2).value(2).endArray().endObject()).errorMatches( "expected field called [counts]" + ), + exampleMalformedValue(b -> b.startObject().startArray("counts").value(2).value(2).endArray().endObject()).errorMatches( + "expected field called [values]" + ), + // Make sure that entire sub-object is preserved in synthetic source + exampleMalformedValue( + b -> b.startObject() + .startArray("values") + .value(2) + .endArray() + .field("somefield", randomString) + .array("somearray", randomLong, randomLong) + .startObject("someobject") + .field("nestedfield", randomDouble) + .endObject() + .endObject() + ).errorMatches("unknown parameter [somefield]"), + exampleMalformedValue(b -> b.startArray().value(randomLong).value(randomLong).endArray()).errorMatches( + "expecting token of type [START_OBJECT] but found [VALUE_NUMBER]" ) ); } @@ -336,13 +373,44 @@ protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + public void testArrayValueSyntheticSource() throws Exception { + DocumentMapper mapper = createDocumentMapper( + syntheticSourceFieldMapping(b -> b.field("type", "histogram").field("ignore_malformed", "true")) + ); + + var randomString = randomAlphaOfLength(10); + CheckedConsumer arrayValue = b -> { + b.startArray("field"); + { + b.startObject().field("counts", new int[] { 1, 2, 3 }).field("values", new double[] { 1, 2, 3 }).endObject(); + b.startObject().field("counts", new int[] { 4, 5, 6 }).field("values", new double[] { 4, 5, 6 }).endObject(); + b.value(randomString); + } + b.endArray(); + }; + + var expected = JsonXContent.contentBuilder().startObject(); + // First value comes from synthetic field loader and so is formatted in a specific format (e.g. values always come first). + // Other values are stored as is as part of ignore_malformed logic for synthetic source. + { + expected.startArray("field"); + expected.startObject().field("values", new double[] { 1, 2, 3 }).field("counts", new int[] { 1, 2, 3 }).endObject(); + expected.startObject().field("counts", new int[] { 4, 5, 6 }).field("values", new double[] { 4, 5, 6 }).endObject(); + expected.value(randomString); + expected.endArray(); + } + expected.endObject(); + + var syntheticSource = syntheticSource(mapper, arrayValue); + assertEquals(Strings.toString(expected), syntheticSource); + } + @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { - assumeFalse("synthetic _source support for histogram doesn't support ignore_malformed", ignoreMalformed); - return new HistogramFieldSyntheticSourceSupport(); + return new HistogramFieldSyntheticSourceSupport(ignoreMalformed); } - private static class HistogramFieldSyntheticSourceSupport implements SyntheticSourceSupport { + private record HistogramFieldSyntheticSourceSupport(boolean ignoreMalformed) implements SyntheticSourceSupport { @Override public SyntheticSourceExample example(int maxVals) { if (randomBoolean()) { @@ -371,21 +439,14 @@ private int randomCount() { private void mapping(XContentBuilder b) throws IOException { b.field("type", "histogram"); + if (ignoreMalformed) { + b.field("ignore_malformed", true); + } } @Override public List invalidExample() throws IOException { - return List.of( - new SyntheticSourceInvalidExample( - matchesPattern( - "field \\[field] of type \\[histogram] doesn't support synthetic source because it ignores malformed histograms" - ), - b -> { - b.field("type", "histogram"); - b.field("ignore_malformed", true); - } - ) - ); + return List.of(); } } } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java index bb7eba340c0ad..22210361f4430 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java @@ -36,7 +36,11 @@ public class AutoscalingSnapshotsIT extends AutoscalingIntegTestCase { public void setup() throws Exception { Path location = randomRepoPath(); logger.info("--> creating repository [{}] [{}]", REPO, "fs"); - assertAcked(clusterAdmin().preparePutRepository(REPO).setType("fs").setSettings(Settings.builder().put("location", location))); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPO) + .setType("fs") + .setSettings(Settings.builder().put("location", location)) + ); } public void testAutoscalingPolicyWillNotBeRestored() { @@ -47,7 +51,7 @@ public void testAutoscalingPolicyWillNotBeRestored() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -71,7 +75,7 @@ public void testAutoscalingPolicyWillNotBeRestored() { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setRestoreGlobalState(true) .get(); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 90bbc29a11b41..423d555de9eab 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -154,7 +154,11 @@ public void testThatRepositoryRecoversEmptyIndexBasedOnLeaderSettings() throws I Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -225,7 +229,11 @@ public void testDocsAreRecovered() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -292,7 +300,11 @@ public void testRateLimitingIsEmployed() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -357,7 +369,11 @@ public void testIndividualActionsTimeout() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -420,7 +436,11 @@ public void testFollowerMappingIsUpdated() throws IOException { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -573,7 +593,8 @@ public void testCcrRepositoryFetchesSnapshotShardSizeFromIndexShardStoreStats() }; clusterService.addListener(listener); - final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderCluster, CcrRepository.LATEST).indices(leaderIndex) + final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, leaderCluster, CcrRepository.LATEST) + .indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index a0b25faea9256..716554eb3927c 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -151,7 +151,7 @@ private RestoreSnapshotRequest setUpRestoreSnapshotRequest( final Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - return new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indexSettings(settingsBuilder) + return new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, leaderClusterRepoName, CcrRepository.LATEST).indexSettings(settingsBuilder) .indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 560c98fbd210b..a74aa4c323426 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -187,13 +187,14 @@ private void createFollowerIndex( .build(); final String leaderClusterRepoName = CcrRepository.NAME_PREFIX + request.getRemoteCluster(); - final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices( - request.getLeaderIndex() - ) + final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + request.masterNodeTimeout(), + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(request.getLeaderIndex()) .indicesOptions(request.indicesOptions()) .renamePattern("^(.*)$") .renameReplacement(Matcher.quoteReplacement(request.getFollowerIndex())) - .masterNodeTimeout(request.masterNodeTimeout()) .indexSettings(overrideSettings) .quiet(true); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index c2b5080aa16c1..5c02288e704f7 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -356,7 +356,7 @@ private IndexRequestBuilder[] snapshotAndRestore(final String sourceIdx, final b assertFalse(clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2").get().isTimedOut()); logger.info("--> restore the index and ensure all shards are allocated"); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repo, snapshot) + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) .setWaitForCompletion(true) .setIndices(sourceIdx) .get(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java index 1377e9103b00b..3be40e5a1550d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java @@ -50,8 +50,7 @@ void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentCl } getClient().admin() .cluster() - .prepareDeleteSnapshot(repositoryName, snapshotName) - .setMasterNodeTimeout(TimeValue.MAX_VALUE) + .prepareDeleteSnapshot(TimeValue.MAX_VALUE, repositoryName, snapshotName) .execute(new ActionListener<>() { @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java index 7d6397b7c96dd..070bc804f3279 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java @@ -100,13 +100,12 @@ void createSnapshot(IndexMetadata indexMetadata, ActionListener listene ); return; } - CreateSnapshotRequest request = new CreateSnapshotRequest(snapshotRepository, snapshotName); + CreateSnapshotRequest request = new CreateSnapshotRequest(TimeValue.MAX_VALUE, snapshotRepository, snapshotName); request.indices(indexName); // this is safe as the snapshot creation will still be async, it's just that the listener will be notified when the snapshot is // complete request.waitForCompletion(true); request.includeGlobalState(false); - request.masterNodeTimeout(TimeValue.MAX_VALUE); getClient().admin().cluster().createSnapshot(request, listener.map(response -> { logger.debug( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java index 9c24324f706ca..7ce81fa90a557 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java @@ -104,7 +104,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, ); String snapshotName = snapPolicyMeta.getLastSuccess().getSnapshotName(); String repositoryName = snapPolicyMeta.getPolicy().getRepository(); - GetSnapshotsRequest request = new GetSnapshotsRequest().repositories(repositoryName) + GetSnapshotsRequest request = new GetSnapshotsRequest(TimeValue.MAX_VALUE).repositories(repositoryName) .snapshots(new String[] { snapshotName }) .includeIndexNames(true) .verbose(false); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 046ccc3037a05..09a49c53ee1a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.XContentBuilder; @@ -399,6 +400,24 @@ public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { Map transformedSource = filter(result.v2(), filter, 0); XContentBuilder xContentBuilder = XContentBuilder.builder(result.v1().xContent()).map(transformedSource); visitor.binaryField(fieldInfo, BytesReference.toBytes(BytesReference.bytes(xContentBuilder))); + } else if (IgnoredSourceFieldMapper.NAME.equals(fieldInfo.name)) { + // for _ignored_source, parse, filter out the field and its contents, and serialize back downstream + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(value); + Map transformedField = filter(mappedNameValue.map(), filter, 0); + if (transformedField.isEmpty() == false) { + // The unfiltered map contains at least one element, the field name with its value. If the field contains + // an object or an array, the value of the first element is a map or a list, respectively. Otherwise, + // it's a single leaf value, e.g. a string or a number. + var topValue = mappedNameValue.map().values().iterator().next(); + if (topValue instanceof Map || topValue instanceof List) { + // The field contains an object or an array, reconstruct it from the transformed map in case + // any subfield has been filtered out. + visitor.binaryField(fieldInfo, IgnoredSourceFieldMapper.encodeFromMap(mappedNameValue, transformedField)); + } else { + // The field contains a leaf value, and it hasn't been filtered out. It is safe to propagate the original value. + visitor.binaryField(fieldInfo, value); + } + } } else { visitor.binaryField(fieldInfo, value); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index 4dc0ea1d77e42..fb892a318f07c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -256,8 +256,8 @@ private Map addPolicyNameToMetadata(final Map me * Generate a new create snapshot request from this policy. The name of the snapshot is * generated at this time based on any date math expressions in the "name" field. */ - public CreateSnapshotRequest toRequest() { - CreateSnapshotRequest req = new CreateSnapshotRequest(repository, generateSnapshotName(this.name)); + public CreateSnapshotRequest toRequest(TimeValue masterNodeTimeout) { + CreateSnapshotRequest req = new CreateSnapshotRequest(masterNodeTimeout, repository, generateSnapshotName(this.name)); Map mergedConfiguration = configuration == null ? new HashMap<>() : new HashMap<>(configuration); @SuppressWarnings("unchecked") Map metadata = (Map) mergedConfiguration.get("metadata"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index c2d51680c3146..560dee9b5843c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -48,6 +48,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automata; @@ -59,12 +60,16 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; +import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; @@ -86,7 +91,7 @@ import static org.hamcrest.Matchers.equalTo; /** Simple tests for this filterreader */ -public class FieldSubsetReaderTests extends ESTestCase { +public class FieldSubsetReaderTests extends MapperServiceTestCase { /** * test filtering two string fields @@ -711,6 +716,127 @@ public void testSourceFilteringIntegration() throws Exception { IOUtils.close(ir, iw, dir); } + public void testIgnoredSourceFilteringIntegration() throws Exception { + DocumentMapper mapper = createMapperService( + Settings.builder() + .put("index.mapping.total_fields.limit", 1) + .put("index.mapping.total_fields.ignore_dynamic_beyond_limit", true) + .build(), + syntheticSourceMapping(b -> { + b.startObject("foo").field("type", "keyword").endObject(); + }) + ).documentMapper(); + + try (Directory directory = newDirectory()) { + RandomIndexWriter iw = indexWriterForSyntheticSource(directory); + ParsedDocument doc = mapper.parse(source(b -> { + b.field("fieldA", "testA"); + b.field("fieldB", "testB"); + b.startObject("obj").field("fieldC", "testC").endObject(); + b.startArray("arr").startObject().field("fieldD", "testD").endObject().endArray(); + })); + doc.updateSeqID(0, 0); + doc.version().setLongValue(0); + iw.addDocuments(doc.docs()); + iw.close(); + + { + Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", IgnoredSourceFieldMapper.NAME)); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals("{\"fieldA\":\"testA\"}", syntheticSource); + } + } + + { + Automaton automaton = Operations.minus( + Automata.makeAnyString(), + Automatons.patterns("fieldA"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT + ); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{"fieldD":"testD"}],"fieldB":"testB","obj":{"fieldC":"testC"}}""", syntheticSource); + } + } + + { + Automaton automaton = Automatons.patterns(Arrays.asList("obj.fieldC", IgnoredSourceFieldMapper.NAME)); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"obj":{"fieldC":"testC"}}""", syntheticSource); + } + } + + { + Automaton automaton = Operations.minus( + Automata.makeAnyString(), + Automatons.patterns("obj.fieldC"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT + ); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{"fieldD":"testD"}],"fieldA":"testA","fieldB":"testB"}""", syntheticSource); + } + } + + { + Automaton automaton = Automatons.patterns(Arrays.asList("arr.fieldD", IgnoredSourceFieldMapper.NAME)); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{"fieldD":"testD"}]}""", syntheticSource); + } + } + + { + Automaton automaton = Operations.minus( + Automata.makeAnyString(), + Automatons.patterns("arr.fieldD"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT + ); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{}],"fieldA":"testA","fieldB":"testB","obj":{"fieldC":"testC"}}""", syntheticSource); + } + } + } + } + public void testSourceFiltering() { // include on top-level value Map map = new HashMap<>(); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json b/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json index 96fa641726fa3..40666ea28097b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json @@ -3,7 +3,6 @@ "index": { "auto_expand_replicas": "0-1", "number_of_shards": 1, - "number_of_replicas": 0, "priority": 100, "refresh_interval": "1s" } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json index 9aa9731be6524..70ba4c3664588 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json @@ -8,7 +8,6 @@ }, "codec": "best_compression", "number_of_shards": 1, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "final_pipeline": "behavioral_analytics-events-final_pipeline", "sort": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json index 22f35b3ac5c99..6ff9510574281 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json @@ -3,8 +3,7 @@ "settings": { "hidden": true, "number_of_shards": "1", - "auto_expand_replicas": "0-1", - "number_of_replicas": "0" + "auto_expand_replicas": "0-1" } }, "_meta": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json index 0e82cc0f2a6df..4d307949c18db 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json @@ -8,7 +8,6 @@ "order": 100, "settings": { "number_of_shards": 1, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "index.priority": 10, "index.format": 1 diff --git a/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json b/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json index e549d3bb3d168..4f47d579f7eb4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json @@ -8,7 +8,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1" }, "lifecycle": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json index 4e77d35b4de25..7d13712e9c371 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json @@ -4,7 +4,6 @@ "settings": { "index": { "number_of_shards": 1, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "format": 7, "codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json index d8d911a31baa9..50a5e7c15022a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json @@ -3,7 +3,6 @@ "version": ${xpack.monitoring.template.release.version}, "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.format": 7, "index.codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json index ad631b8e5762c..0e0a6f14e5206 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json @@ -3,7 +3,6 @@ "version": ${xpack.monitoring.template.release.version}, "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.format": 7, "index.codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json index ae48f9f552b51..e11627f93650d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json @@ -3,7 +3,6 @@ "version": ${xpack.monitoring.template.release.version}, "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.format": 7, "index.codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json index 70ffedb3f5462..9b90f97682306 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": 4, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "max_result_window": 150000, "refresh_interval": "10s", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json index 0f1c24d96c092..f1e5e01d50c16 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json @@ -2,7 +2,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json index 353411ed80b2e..3d5e5d0fdc9b7 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json @@ -2,7 +2,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "sort": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json index e933aa117a6b3..35f53a36b2d0b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json @@ -2,7 +2,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "sort": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json index c28a548f95418..9e8a344d23959 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": 16, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json index 470edd710136d..6c96fb21673ae 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": 16, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json index 48b88492a777d..9271718bd27ed 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": "16", - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json index 72d8cf6e1dfc2..7e7229e83c823 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json @@ -5,7 +5,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json index 6f32af12c84bf..71c4d15989b7a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json @@ -5,7 +5,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json index d3c5b0af215e6..20849bfe8f27d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json @@ -5,7 +5,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json b/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json index c154cdfe19d66..e3b13f3f8c841 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json @@ -9,7 +9,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1" }, "lifecycle": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json b/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json index d8b5ea75d88c4..2eed69c7c58e6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json @@ -7,7 +7,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.hidden": true, "index.format": 6 diff --git a/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json b/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json index 79b0c6fb228bd..19e4dc022daa1 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json @@ -7,7 +7,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.lifecycle.name": "watch-history-ilm-policy-16", "index.hidden": true, diff --git a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java index 9fa0320527f93..7c0b70c658e7a 100644 --- a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java +++ b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; @@ -68,13 +69,16 @@ static Properties loadConfiguration() throws IOException { } } + // timeout is not rendered in the JSON so doesn't matter + private static final TimeValue DUMMY_TIMEOUT = TimeValue.THIRTY_SECONDS; + static void restoreSnapshot(RestClient client, Properties cfg) throws IOException { int status = client.performRequest(new Request("HEAD", "/" + cfg.getProperty("index_name"))).getStatusLine().getStatusCode(); if (status == 404) { Request createRepo = new Request("PUT", "/_snapshot/" + cfg.getProperty("gcs_repo_name")); createRepo.setJsonEntity( Strings.toString( - new PutRepositoryRequest().type("gcs") + new PutRepositoryRequest(DUMMY_TIMEOUT, DUMMY_TIMEOUT).type("gcs") .settings( Settings.builder() .put("bucket", cfg.getProperty("gcs_bucket_name")) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java index 2ef0b892138de..e5c2cedfd087b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java @@ -6,10 +6,15 @@ */ package org.elasticsearch.xpack.esql.core.expression.function.scalar; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; +import java.io.IOException; import java.util.List; import static java.util.Collections.singletonList; @@ -18,16 +23,21 @@ public abstract class UnaryScalarFunction extends ScalarFunction { private final Expression field; - protected UnaryScalarFunction(Source source) { - super(source); - this.field = null; - } - protected UnaryScalarFunction(Source source, Expression field) { super(source, singletonList(field)); this.field = field; } + protected UnaryScalarFunction(StreamInput in) throws IOException { + this(Source.readFrom((StreamInput & PlanStreamInput) in), ((PlanStreamInput) in).readExpression()); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(field); + } + @Override public final UnaryScalarFunction replaceChildren(List newChildren) { return replaceChild(newChildren.get(0)); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java index 31c63393afaea..5f183a1cc26ea 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.logical; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; @@ -14,15 +16,27 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.io.IOException; + import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isBoolean; public class Not extends UnaryScalarFunction implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Not", Not::new); public Not(Source source, Expression child) { super(source, child); } + private Not(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Not::new, field()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java index 52375c5db01a1..e365480a6fd79 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.nulls; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; @@ -16,12 +18,28 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.io.IOException; + public class IsNotNull extends UnaryScalarFunction implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "IsNotNull", + IsNotNull::new + ); public IsNotNull(Source source, Expression field) { super(source, field); } + private IsNotNull(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, IsNotNull::new, field()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java index d52eec9114df6..8b6eb5d4404b0 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.nulls; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; @@ -16,12 +18,24 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.io.IOException; + public class IsNull extends UnaryScalarFunction implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "IsNull", IsNull::new); public IsNull(Source source, Expression field) { super(source, field); } + private IsNull(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, IsNull::new, field()); diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index 92071543aa27e..dbec0963d1aab 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -28,7 +28,9 @@ dependencies { // Also contains a dummy processor to allow compilation with unused annotations. annotationProcessor project('compute:gen') - testImplementation project('qa:testFixtures') + testImplementation(project('qa:testFixtures')) { + exclude(group:"org.elasticsearch.plugin", module: "esql") + } testImplementation project(':test:framework') testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(path: xpackModule('enrich')) diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index bc206ee1d78d6..c7fa29c6a91f0 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -410,6 +410,11 @@ tasks.named('stringTemplates').configure { it.inputFile = stateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/LongState.java" } + template { + it.properties = floatProperties + it.inputFile = stateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/FloatState.java" + } template { it.properties = doubleProperties it.inputFile = stateInputFile @@ -463,6 +468,11 @@ tasks.named('stringTemplates').configure { it.inputFile = arrayStateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/DoubleArrayState.java" } + template { + it.properties = floatProperties + it.inputFile = arrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/FloatArrayState.java" + } File valuesAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st") template { it.properties = intProperties @@ -474,6 +484,11 @@ tasks.named('stringTemplates').configure { it.inputFile = valuesAggregatorInputFile it.outputFile = "org/elasticsearch/compute/aggregation/ValuesLongAggregator.java" } + template { + it.properties = floatProperties + it.inputFile = valuesAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/ValuesFloatAggregator.java" + } template { it.properties = doubleProperties it.inputFile = valuesAggregatorInputFile @@ -496,6 +511,11 @@ tasks.named('stringTemplates').configure { it.inputFile = rateAggregatorInputFile it.outputFile = "org/elasticsearch/compute/aggregation/RateLongAggregator.java" } + template { + it.properties = floatProperties + it.inputFile = rateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/RateFloatAggregator.java" + } template { it.properties = doubleProperties it.inputFile = rateAggregatorInputFile @@ -514,6 +534,11 @@ tasks.named('stringTemplates').configure { it.inputFile = topListAggregatorInputFile it.outputFile = "org/elasticsearch/compute/aggregation/TopListLongAggregator.java" } + template { + it.properties = floatProperties + it.inputFile = topListAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopListFloatAggregator.java" + } template { it.properties = doubleProperties it.inputFile = topListAggregatorInputFile @@ -667,6 +692,11 @@ tasks.named('stringTemplates').configure { it.inputFile = bucketedSortInputFile it.outputFile = "org/elasticsearch/compute/data/sort/LongBucketedSort.java" } + template { + it.properties = floatProperties + it.inputFile = bucketedSortInputFile + it.outputFile = "org/elasticsearch/compute/data/sort/FloatBucketedSort.java" + } template { it.properties = doubleProperties it.inputFile = bucketedSortInputFile diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index d3fe51b4cc225..1127d4b4ccb72 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -44,6 +44,8 @@ import static org.elasticsearch.compute.gen.Types.DOUBLE_VECTOR; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.ELEMENT_TYPE; +import static org.elasticsearch.compute.gen.Types.FLOAT_BLOCK; +import static org.elasticsearch.compute.gen.Types.FLOAT_VECTOR; import static org.elasticsearch.compute.gen.Types.INTERMEDIATE_STATE_DESC; import static org.elasticsearch.compute.gen.Types.INT_BLOCK; import static org.elasticsearch.compute.gen.Types.INT_VECTOR; @@ -136,6 +138,8 @@ static String valueType(ExecutableElement init, ExecutableElement combine) { switch (initReturn) { case "double": return "double"; + case "float": + return "float"; case "long": return "long"; case "int": @@ -151,6 +155,7 @@ static ClassName valueBlockType(ExecutableElement init, ExecutableElement combin return switch (valueType(init, combine)) { case "boolean" -> BOOLEAN_BLOCK; case "double" -> DOUBLE_BLOCK; + case "float" -> FLOAT_BLOCK; case "long" -> LONG_BLOCK; case "int" -> INT_BLOCK; case "org.apache.lucene.util.BytesRef" -> BYTES_REF_BLOCK; @@ -162,6 +167,7 @@ static ClassName valueVectorType(ExecutableElement init, ExecutableElement combi return switch (valueType(init, combine)) { case "boolean" -> BOOLEAN_VECTOR; case "double" -> DOUBLE_VECTOR; + case "float" -> FLOAT_VECTOR; case "long" -> LONG_VECTOR; case "int" -> INT_VECTOR; case "org.apache.lucene.util.BytesRef" -> BYTES_REF_VECTOR; @@ -445,6 +451,8 @@ private String primitiveStateMethod() { return "longValue"; case "org.elasticsearch.compute.aggregation.DoubleState": return "doubleValue"; + case "org.elasticsearch.compute.aggregation.FloatState": + return "floatValue"; default: throw new IllegalArgumentException( "don't know how to fetch primitive values from " + stateType + ". define combineIntermediate." @@ -495,6 +503,9 @@ private void primitiveStateToResult(MethodSpec.Builder builder) { case "org.elasticsearch.compute.aggregation.DoubleState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1)"); return; + case "org.elasticsearch.compute.aggregation.FloatState": + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantFloatBlockWith(state.floatValue(), 1)"); + return; default: throw new IllegalArgumentException("don't know how to convert state to result: " + stateType); } @@ -521,7 +532,7 @@ private MethodSpec close() { private boolean hasPrimitiveState() { return switch (stateType.toString()) { case "org.elasticsearch.compute.aggregation.IntState", "org.elasticsearch.compute.aggregation.LongState", - "org.elasticsearch.compute.aggregation.DoubleState" -> true; + "org.elasticsearch.compute.aggregation.DoubleState", "org.elasticsearch.compute.aggregation.FloatState" -> true; default -> false; }; } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index cb65d2337d588..c9cdcfe42fddd 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -585,7 +585,7 @@ private MethodSpec close() { private boolean hasPrimitiveState() { return switch (stateType.toString()) { case "org.elasticsearch.compute.aggregation.IntArrayState", "org.elasticsearch.compute.aggregation.LongArrayState", - "org.elasticsearch.compute.aggregation.DoubleArrayState" -> true; + "org.elasticsearch.compute.aggregation.DoubleArrayState", "org.elasticsearch.compute.aggregation.FloatArrayState" -> true; default -> false; }; } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java index 741a1294e6fb5..6f98f1f797ab0 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java @@ -211,6 +211,7 @@ static String vectorAccessorName(String elementTypeName) { case "INT" -> "getInt"; case "LONG" -> "getLong"; case "DOUBLE" -> "getDouble"; + case "FLOAT" -> "getFloat"; case "BYTES_REF" -> "getBytesRef"; default -> throw new IllegalArgumentException( "don't know how to fetch primitive values from " + elementTypeName + ". define combineIntermediate." diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java index 6618d9e4f41b5..3150741ddcb05 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Types.java @@ -41,12 +41,14 @@ public class Types { static final ClassName INT_BLOCK = ClassName.get(DATA_PACKAGE, "IntBlock"); static final ClassName LONG_BLOCK = ClassName.get(DATA_PACKAGE, "LongBlock"); static final ClassName DOUBLE_BLOCK = ClassName.get(DATA_PACKAGE, "DoubleBlock"); + static final ClassName FLOAT_BLOCK = ClassName.get(DATA_PACKAGE, "FloatBlock"); static final ClassName BOOLEAN_BLOCK_BUILDER = BOOLEAN_BLOCK.nestedClass("Builder"); static final ClassName BYTES_REF_BLOCK_BUILDER = BYTES_REF_BLOCK.nestedClass("Builder"); static final ClassName INT_BLOCK_BUILDER = INT_BLOCK.nestedClass("Builder"); static final ClassName LONG_BLOCK_BUILDER = LONG_BLOCK.nestedClass("Builder"); static final ClassName DOUBLE_BLOCK_BUILDER = DOUBLE_BLOCK.nestedClass("Builder"); + static final ClassName FLOAT_BLOCK_BUILDER = FLOAT_BLOCK.nestedClass("Builder"); static final ClassName ELEMENT_TYPE = ClassName.get(DATA_PACKAGE, "ElementType"); @@ -55,35 +57,41 @@ public class Types { static final ClassName INT_VECTOR = ClassName.get(DATA_PACKAGE, "IntVector"); static final ClassName LONG_VECTOR = ClassName.get(DATA_PACKAGE, "LongVector"); static final ClassName DOUBLE_VECTOR = ClassName.get(DATA_PACKAGE, "DoubleVector"); + static final ClassName FLOAT_VECTOR = ClassName.get(DATA_PACKAGE, "FloatVector"); static final ClassName BOOLEAN_VECTOR_BUILDER = ClassName.get(DATA_PACKAGE, "BooleanVector", "Builder"); static final ClassName BYTES_REF_VECTOR_BUILDER = ClassName.get(DATA_PACKAGE, "BytesRefVector", "Builder"); static final ClassName INT_VECTOR_BUILDER = ClassName.get(DATA_PACKAGE, "IntVector", "Builder"); static final ClassName LONG_VECTOR_BUILDER = ClassName.get(DATA_PACKAGE, "LongVector", "Builder"); static final ClassName DOUBLE_VECTOR_BUILDER = ClassName.get(DATA_PACKAGE, "DoubleVector", "Builder"); + static final ClassName FLOAT_VECTOR_BUILDER = ClassName.get(DATA_PACKAGE, "FloatVector", "Builder"); static final ClassName BOOLEAN_VECTOR_FIXED_BUILDER = ClassName.get(DATA_PACKAGE, "BooleanVector", "FixedBuilder"); static final ClassName INT_VECTOR_FIXED_BUILDER = ClassName.get(DATA_PACKAGE, "IntVector", "FixedBuilder"); static final ClassName LONG_VECTOR_FIXED_BUILDER = ClassName.get(DATA_PACKAGE, "LongVector", "FixedBuilder"); static final ClassName DOUBLE_VECTOR_FIXED_BUILDER = ClassName.get(DATA_PACKAGE, "DoubleVector", "FixedBuilder"); + static final ClassName FLOAT_VECTOR_FIXED_BUILDER = ClassName.get(DATA_PACKAGE, "FloatVector", "FixedBuilder"); static final ClassName BOOLEAN_ARRAY_VECTOR = ClassName.get(DATA_PACKAGE, "BooleanArrayVector"); static final ClassName BYTES_REF_ARRAY_VECTOR = ClassName.get(DATA_PACKAGE, "BytesRefArrayVector"); static final ClassName INT_ARRAY_VECTOR = ClassName.get(DATA_PACKAGE, "IntArrayVector"); static final ClassName LONG_ARRAY_VECTOR = ClassName.get(DATA_PACKAGE, "LongArrayVector"); static final ClassName DOUBLE_ARRAY_VECTOR = ClassName.get(DATA_PACKAGE, "DoubleArrayVector"); + static final ClassName FLOAT_ARRAY_VECTOR = ClassName.get(DATA_PACKAGE, "FloatArrayVector"); static final ClassName BOOLEAN_ARRAY_BLOCK = ClassName.get(DATA_PACKAGE, "BooleanArrayBlock"); static final ClassName BYTES_REF_ARRAY_BLOCK = ClassName.get(DATA_PACKAGE, "BytesRefArrayBlock"); static final ClassName INT_ARRAY_BLOCK = ClassName.get(DATA_PACKAGE, "IntArrayBlock"); static final ClassName LONG_ARRAY_BLOCK = ClassName.get(DATA_PACKAGE, "LongArrayBlock"); static final ClassName DOUBLE_ARRAY_BLOCK = ClassName.get(DATA_PACKAGE, "DoubleArrayBlock"); + static final ClassName FLOAT_ARRAY_BLOCK = ClassName.get(DATA_PACKAGE, "FloatArrayBlock"); static final ClassName BOOLEAN_CONSTANT_VECTOR = ClassName.get(DATA_PACKAGE, "ConstantBooleanVector"); static final ClassName BYTES_REF_CONSTANT_VECTOR = ClassName.get(DATA_PACKAGE, "ConstantBytesRefVector"); static final ClassName INT_CONSTANT_VECTOR = ClassName.get(DATA_PACKAGE, "ConstantIntVector"); static final ClassName LONG_CONSTANT_VECTOR = ClassName.get(DATA_PACKAGE, "ConstantLongVector"); static final ClassName DOUBLE_CONSTANT_VECTOR = ClassName.get(DATA_PACKAGE, "ConstantDoubleVector"); + static final ClassName FLOAT_CONSTANT_VECTOR = ClassName.get(DATA_PACKAGE, "ConstantFloatVector"); static final ClassName AGGREGATOR_FUNCTION = ClassName.get(AGGREGATION_PACKAGE, "AggregatorFunction"); static final ClassName AGGREGATOR_FUNCTION_SUPPLIER = ClassName.get(AGGREGATION_PACKAGE, "AggregatorFunctionSupplier"); @@ -162,6 +170,9 @@ static ClassName blockType(String elementType) { if (elementType.equalsIgnoreCase(TypeName.DOUBLE.toString())) { return DOUBLE_BLOCK; } + if (elementType.equalsIgnoreCase(TypeName.FLOAT.toString())) { + return FLOAT_BLOCK; + } throw new IllegalArgumentException("unknown vector type for [" + elementType + "]"); } @@ -181,6 +192,9 @@ static ClassName vectorType(TypeName elementType) { if (elementType.equals(TypeName.DOUBLE)) { return DOUBLE_VECTOR; } + if (elementType.equals(TypeName.FLOAT)) { + return FLOAT_VECTOR; + } throw new IllegalArgumentException("unknown vector type for [" + elementType + "]"); } @@ -200,6 +214,9 @@ static ClassName vectorType(String elementType) { if (elementType.equalsIgnoreCase(TypeName.DOUBLE.toString())) { return DOUBLE_VECTOR; } + if (elementType.equalsIgnoreCase(TypeName.FLOAT.toString())) { + return FLOAT_VECTOR; + } throw new IllegalArgumentException("unknown vector type for [" + elementType + "]"); } @@ -234,6 +251,12 @@ static ClassName builderType(TypeName resultType) { if (resultType.equals(DOUBLE_VECTOR)) { return DOUBLE_VECTOR_BUILDER; } + if (resultType.equals(FLOAT_BLOCK)) { + return FLOAT_BLOCK_BUILDER; + } + if (resultType.equals(FLOAT_VECTOR)) { + return FLOAT_VECTOR_BUILDER; + } throw new IllegalArgumentException("unknown builder type for [" + resultType + "]"); } @@ -250,6 +273,9 @@ static ClassName vectorFixedBuilderType(TypeName elementType) { if (elementType.equals(TypeName.DOUBLE)) { return DOUBLE_VECTOR_FIXED_BUILDER; } + if (elementType.equals(TypeName.FLOAT)) { + return FLOAT_VECTOR_FIXED_BUILDER; + } throw new IllegalArgumentException("unknown vector fixed builder type for [" + elementType + "]"); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatArrayState.java new file mode 100644 index 0000000000000..b3767828f00db --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatArrayState.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.FloatArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of floats. It is created in a mode where it + * won't track the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

+ * This class is generated. Do not edit it. + *

+ */ +final class FloatArrayState extends AbstractArrayState implements GroupingAggregatorState { + private final float init; + + private FloatArray values; + + FloatArrayState(BigArrays bigArrays, float init) { + super(bigArrays); + this.values = bigArrays.newFloatArray(1, false); + this.values.set(0, init); + this.init = init; + } + + float get(int groupId) { + return values.get(groupId); + } + + float getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, float value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds()) { + try (var builder = driverContext.blockFactory().newFloatVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendFloat(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (FloatBlock.Builder builder = driverContext.blockFactory().newFloatBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group)) { + builder.appendFloat(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= values.size()) { + long prevSize = values.size(); + values = bigArrays.grow(values, groupId + 1); + values.fill(prevSize, values.size(), init); + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 2; + try ( + var valuesBuilder = driverContext.blockFactory().newFloatBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendFloat(values.get(group)); + } else { + valuesBuilder.appendFloat(0); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java new file mode 100644 index 0000000000000..81bdd39e51b6e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/FloatState.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single float. + * This class is generated. Do not edit it. + */ +final class FloatState implements AggregatorState { + private float value; + private boolean seen; + + FloatState() { + this(0); + } + + FloatState(float init) { + this.value = init; + } + + float floatValue() { + return value; + } + + void floatValue(float value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 2; + blocks[offset + 0] = driverContext.blockFactory().newConstantFloatBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateFloatAggregator.java new file mode 100644 index 0000000000000..b50b125d98331 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateFloatAggregator.java @@ -0,0 +1,341 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +import java.util.Arrays; + +/** + * A rate grouping aggregation definition for float. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + includeTimestamps = true, + value = { + @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), + @IntermediateState(name = "values", type = "FLOAT_BLOCK"), + @IntermediateState(name = "resets", type = "DOUBLE") } +) +public class RateFloatAggregator { + + public static FloatRateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new FloatRateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); + } + + public static void combine(FloatRateGroupingState current, int groupId, long timestamp, float value) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + FloatRateGroupingState current, + int groupId, + LongBlock timestamps, + FloatBlock values, + double reset, + int otherPosition + ) { + current.combine(groupId, timestamps, values, reset, otherPosition); + } + + public static void combineStates( + FloatRateGroupingState current, + int currentGroupId, // make the stylecheck happy + FloatRateGroupingState otherState, + int otherGroupId + ) { + current.combineState(currentGroupId, otherState, otherGroupId); + } + + public static Block evaluateFinal(FloatRateGroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext.blockFactory()); + } + + private static class FloatRateState { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(FloatRateState.class); + final long[] timestamps; // descending order + final float[] values; + double reset = 0; + + FloatRateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new float[initialSize]; + } + + FloatRateState(long[] ts, float[] vs) { + this.timestamps = ts; + this.values = vs; + } + + private float dv(float v0, float v1) { + // counter reset detection + return v0 > v1 ? v1 : v1 - v0; + } + + void append(long t, float v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + reset += dv(v, values[1]) + dv(values[1], values[0]) - dv(v, values[0]); + timestamps[1] = t; + values[1] = v; + } + + int entries() { + return timestamps.length; + } + + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Float.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; + } + } + + public static final class FloatRateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final long unitInMillis; + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states + + FloatRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.states = bigArrays.newObjectArray(1); + this.unitInMillis = unitInMillis; + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + + void append(int groupId, long timestamp, float value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(FloatRateState.bytesUsed(1)); + state = new FloatRateState(new long[] { timestamp }, new float[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + adjustBreaker(FloatRateState.bytesUsed(2)); + state = new FloatRateState(new long[] { state.timestamps[0], timestamp }, new float[] { state.values[0], value }); + states.set(groupId, state); + adjustBreaker(-FloatRateState.bytesUsed(1)); // old state + } else { + state.append(timestamp, value); + } + } + } + + void combine(int groupId, LongBlock timestamps, FloatBlock values, double reset, int otherPosition) { + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(FloatRateState.bytesUsed(valueCount)); + state = new FloatRateState(valueCount); + state.reset = reset; + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getFloat(firstIndex + i); + } + } else { + adjustBreaker(FloatRateState.bytesUsed(state.entries() + valueCount)); + var newState = new FloatRateState(state.entries() + valueCount); + newState.reset = state.reset + reset; + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-FloatRateState.bytesUsed(state.entries())); // old state + } + } + + void merge(FloatRateState curr, FloatRateState dst, int firstIndex, int rightCount, LongBlock timestamps, FloatBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + while (i < leftCount && j < rightCount) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getFloat(firstIndex + j); + ++j; + } + ++k; + } + if (i < leftCount) { + System.arraycopy(curr.timestamps, i, dst.timestamps, k, leftCount - i); + System.arraycopy(curr.values, i, dst.values, k, leftCount - i); + } + while (j < rightCount) { + dst.timestamps[k] = timestamps.getLong(firstIndex + j); + dst.values[k] = values.getFloat(firstIndex + j); + ++k; + ++j; + } + } + + void combineState(int groupId, FloatRateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker(FloatRateState.bytesUsed(len)); + curr = new FloatRateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + FloatRateState mergeState(FloatRateState s1, FloatRateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker(FloatRateState.bytesUsed(newLen)); + var dst = new FloatRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed() + stateBytes; + } + + @Override + public void close() { + Releasables.close(states, () -> adjustBreaker(-stateBytes)); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + FloatBlock.Builder values = blockFactory.newFloatBlockBuilder(positionCount * 2); + DoubleVector.FixedBuilder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (float v : state.values) { + values.appendFloat(v); + } + values.endPositionEntry(); + + resets.appendDouble(i, state.reset); + } else { + timestamps.appendNull(); + values.appendNull(); + resets.appendDouble(i, 0); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + blocks[offset + 2] = resets.build().asBlock(); + } + } + + Block evaluateFinal(IntVector selected, BlockFactory blockFactory) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = blockFactory.newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null) { + rates.appendNull(); + continue; + } + int len = state.entries(); + long dt = state.timestamps[0] - state.timestamps[len - 1]; + if (dt == 0) { + // TODO: maybe issue warning when we don't have enough sample? + rates.appendNull(); + } else { + double reset = state.reset; + for (int i = 1; i < len; i++) { + if (state.values[i - 1] < state.values[i]) { + reset += state.values[i]; + } + } + double dv = state.values[0] - state.values[len - 1] + reset; + rates.appendDouble(dv * unitInMillis / dt); + } + } + return rates.build(); + } + } + + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListFloatAggregator.java new file mode 100644 index 0000000000000..c5fc51d5ba13f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListFloatAggregator.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.sort.FloatBucketedSort; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.sort.SortOrder; + +/** + * Aggregates the top N field values for float. + */ +@Aggregator({ @IntermediateState(name = "topList", type = "FLOAT_BLOCK") }) +@GroupingAggregator +class TopListFloatAggregator { + public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { + return new SingleState(bigArrays, limit, ascending); + } + + public static void combine(SingleState state, float v) { + state.add(v); + } + + public static void combineIntermediate(SingleState state, FloatBlock values) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getFloat(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(BigArrays bigArrays, int limit, boolean ascending) { + return new GroupingState(bigArrays, limit, ascending); + } + + public static void combine(GroupingState state, int groupId, float v) { + state.add(groupId, v); + } + + public static void combineIntermediate(GroupingState state, int groupId, FloatBlock values, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getFloat(i)); + } + } + + public static void combineStates(GroupingState current, int groupId, GroupingState state, int statePosition) { + current.merge(groupId, state, statePosition); + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory(), selected); + } + + public static class GroupingState implements Releasable { + private final FloatBucketedSort sort; + + private GroupingState(BigArrays bigArrays, int limit, boolean ascending) { + this.sort = new FloatBucketedSort(bigArrays, ascending ? SortOrder.ASC : SortOrder.DESC, limit); + } + + public void add(int groupId, float value) { + sort.collect(value, groupId); + } + + public void merge(int groupId, GroupingState other, int otherGroupId) { + sort.merge(groupId, other.sort, otherGroupId); + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory(), selected); + } + + Block toBlock(BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + void enableGroupIdTracking(SeenGroupIds seen) { + // we figure out seen values from nulls on the values block + } + + @Override + public void close() { + Releasables.closeExpectNoException(sort); + } + } + + public static class SingleState implements Releasable { + private final GroupingState internalState; + + private SingleState(BigArrays bigArrays, int limit, boolean ascending) { + this.internalState = new GroupingState(bigArrays, limit, ascending); + } + + public void add(float value) { + internalState.add(0, value); + } + + public void merge(GroupingState other) { + internalState.merge(0, other, 0); + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory()); + } + + Block toBlock(BlockFactory blockFactory) { + try (var intValues = blockFactory.newConstantIntVector(0, 1)) { + return internalState.toBlock(blockFactory, intValues); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(internalState); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/ValuesFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/ValuesFloatAggregator.java new file mode 100644 index 0000000000000..f9e5e1b7b283a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/ValuesFloatAggregator.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; + +/** + * Aggregates field values for float. + * This class is generated. Edit @{code X-ValuesAggregator.java.st} instead + * of this file. + */ +@Aggregator({ @IntermediateState(name = "values", type = "FLOAT_BLOCK") }) +@GroupingAggregator +class ValuesFloatAggregator { + public static SingleState initSingle(BigArrays bigArrays) { + return new SingleState(bigArrays); + } + + public static void combine(SingleState state, float v) { + state.values.add(Float.floatToIntBits(v)); + } + + public static void combineIntermediate(SingleState state, FloatBlock values) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getFloat(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(BigArrays bigArrays) { + return new GroupingState(bigArrays); + } + + public static void combine(GroupingState state, int groupId, float v) { + /* + * Encode the groupId and value into a single long - + * the top 32 bits for the group, the bottom 32 for the value. + */ + state.values.add((((long) groupId) << Float.SIZE) | (Float.floatToIntBits(v) & 0xFFFFFFFFL)); + } + + public static void combineIntermediate(GroupingState state, int groupId, FloatBlock values, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getFloat(i)); + } + } + + public static void combineStates(GroupingState current, int currentGroupId, GroupingState state, int statePosition) { + for (int id = 0; id < state.values.size(); id++) { + long both = state.values.get(id); + int group = (int) (both >>> Float.SIZE); + if (group == statePosition) { + float value = Float.intBitsToFloat((int) both); + combine(current, currentGroupId, value); + } + } + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory(), selected); + } + + public static class SingleState implements Releasable { + private final LongHash values; + + private SingleState(BigArrays bigArrays) { + values = new LongHash(1, bigArrays); + } + + void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory()); + } + + Block toBlock(BlockFactory blockFactory) { + if (values.size() == 0) { + return blockFactory.newConstantNullBlock(1); + } + if (values.size() == 1) { + return blockFactory.newConstantFloatBlockWith(Float.intBitsToFloat((int) values.get(0)), 1); + } + try (FloatBlock.Builder builder = blockFactory.newFloatBlockBuilder((int) values.size())) { + builder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendFloat(Float.intBitsToFloat((int) values.get(id))); + } + builder.endPositionEntry(); + return builder.build(); + } + } + + @Override + public void close() { + values.close(); + } + } + + /** + * State for a grouped {@code VALUES} aggregation. This implementation + * emphasizes collect-time performance over the performance of rendering + * results. That's good, but it's a pretty intensive emphasis, requiring + * an {@code O(n^2)} operation for collection to support a {@code O(1)} + * collector operation. But at least it's fairly simple. + */ + public static class GroupingState implements Releasable { + private final LongHash values; + + private GroupingState(BigArrays bigArrays) { + values = new LongHash(1, bigArrays); + } + + void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory(), selected); + } + + Block toBlock(BlockFactory blockFactory, IntVector selected) { + if (values.size() == 0) { + return blockFactory.newConstantNullBlock(selected.getPositionCount()); + } + try (FloatBlock.Builder builder = blockFactory.newFloatBlockBuilder(selected.getPositionCount())) { + for (int s = 0; s < selected.getPositionCount(); s++) { + int selectedGroup = selected.getInt(s); + /* + * Count can effectively be in three states - 0, 1, many. We use those + * states to buffer the first value, so we can avoid calling + * beginPositionEntry on single valued fields. + */ + int count = 0; + float first = 0; + for (int id = 0; id < values.size(); id++) { + long both = values.get(id); + int group = (int) (both >>> Float.SIZE); + if (group == selectedGroup) { + float value = Float.intBitsToFloat((int) both); + switch (count) { + case 0 -> first = value; + case 1 -> { + builder.beginPositionEntry(); + builder.appendFloat(first); + builder.appendFloat(value); + } + default -> builder.appendFloat(value); + } + count++; + } + } + switch (count) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendFloat(first); + default -> builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + void enableGroupIdTracking(SeenGroupIds seen) { + // we figure out seen values from nulls on the values block + } + + @Override + public void close() { + values.close(); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/FloatBucketedSort.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/FloatBucketedSort.java new file mode 100644 index 0000000000000..b490fe193c33f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/sort/FloatBucketedSort.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.FloatArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortOrder; + +import java.util.Arrays; +import java.util.stream.IntStream; + +/** + * Aggregates the top N float values per bucket. + * See {@link BucketedSort} for more information. + * This class is generated. Edit @{code X-BucketedSort.java.st} instead of this file. + */ +public class FloatBucketedSort implements Releasable { + + private final BigArrays bigArrays; + private final SortOrder order; + private final int bucketSize; + /** + * {@code true} if the bucket is in heap mode, {@code false} if + * it is still gathering. + */ + private final BitArray heapMode; + /** + * An array containing all the values on all buckets. The structure is as follows: + *

+ * For each bucket, there are bucketSize elements, based on the bucket id (0, 1, 2...). + * Then, for each bucket, it can be in 2 states: + *

+ *
    + *
  • + * Gather mode: All buckets start in gather mode, and remain here while they have less than bucketSize elements. + * In gather mode, the elements are stored in the array from the highest index to the lowest index. + * The lowest index contains the offset to the next slot to be filled. + *

    + * This allows us to insert elements in O(1) time. + *

    + *

    + * When the bucketSize-th element is collected, the bucket transitions to heap mode, by heapifying its contents. + *

    + *
  • + *
  • + * Heap mode: The bucket slots are organized as a min heap structure. + *

    + * The root of the heap is the minimum value in the bucket, + * which allows us to quickly discard new values that are not in the top N. + *

    + *
  • + *
+ */ + private FloatArray values; + + public FloatBucketedSort(BigArrays bigArrays, SortOrder order, int bucketSize) { + this.bigArrays = bigArrays; + this.order = order; + this.bucketSize = bucketSize; + heapMode = new BitArray(0, bigArrays); + + boolean success = false; + try { + values = bigArrays.newFloatArray(0, false); + success = true; + } finally { + if (success == false) { + close(); + } + } + } + + /** + * Collects a {@code value} into a {@code bucket}. + *

+ * It may or may not be inserted in the heap, depending on if it is better than the current root. + *

+ */ + public void collect(float value, int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (inHeapMode(bucket)) { + if (betterThan(value, values.get(rootIndex))) { + values.set(rootIndex, value); + downHeap(rootIndex, 0); + } + return; + } + // Gathering mode + long requiredSize = rootIndex + bucketSize; + if (values.size() < requiredSize) { + grow(requiredSize); + } + int next = getNextGatherOffset(rootIndex); + assert 0 <= next && next < bucketSize + : "Expected next to be in the range of valid buckets [0 <= " + next + " < " + bucketSize + "]"; + long index = next + rootIndex; + values.set(index, value); + if (next == 0) { + heapMode.set(bucket); + heapify(rootIndex); + } else { + setNextGatherOffset(rootIndex, next - 1); + } + } + + /** + * The order of the sort. + */ + public SortOrder getOrder() { + return order; + } + + /** + * The number of values to store per bucket. + */ + public int getBucketSize() { + return bucketSize; + } + + /** + * Get the first and last indexes (inclusive, exclusive) of the values for a bucket. + * Returns [0, 0] if the bucket has never been collected. + */ + private Tuple getBucketValuesIndexes(int bucket) { + long rootIndex = (long) bucket * bucketSize; + if (rootIndex >= values.size()) { + // We've never seen this bucket. + return Tuple.tuple(0L, 0L); + } + long start = inHeapMode(bucket) ? rootIndex : (rootIndex + getNextGatherOffset(rootIndex) + 1); + long end = rootIndex + bucketSize; + return Tuple.tuple(start, end); + } + + /** + * Merge the values from {@code other}'s {@code otherGroupId} into {@code groupId}. + */ + public void merge(int groupId, FloatBucketedSort other, int otherGroupId) { + var otherBounds = other.getBucketValuesIndexes(otherGroupId); + + // TODO: This can be improved for heapified buckets by making use of the heap structures + for (long i = otherBounds.v1(); i < otherBounds.v2(); i++) { + collect(other.values.get(i), groupId); + } + } + + /** + * Creates a block with the values from the {@code selected} groups. + */ + public Block toBlock(BlockFactory blockFactory, IntVector selected) { + // Check if the selected groups are all empty, to avoid allocating extra memory + if (IntStream.range(0, selected.getPositionCount()).map(selected::getInt).noneMatch(bucket -> { + var bounds = this.getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + return size > 0; + })) { + return blockFactory.newConstantNullBlock(selected.getPositionCount()); + } + + // Used to sort the values in the bucket. + var bucketValues = new float[bucketSize]; + + try (var builder = blockFactory.newFloatBlockBuilder(selected.getPositionCount())) { + for (int s = 0; s < selected.getPositionCount(); s++) { + int bucket = selected.getInt(s); + + var bounds = getBucketValuesIndexes(bucket); + var size = bounds.v2() - bounds.v1(); + + if (size == 0) { + builder.appendNull(); + continue; + } + + if (size == 1) { + builder.appendFloat(values.get(bounds.v1())); + continue; + } + + for (int i = 0; i < size; i++) { + bucketValues[i] = values.get(bounds.v1() + i); + } + + // TODO: Make use of heap structures to faster iterate in order instead of copying and sorting + Arrays.sort(bucketValues, 0, (int) size); + + builder.beginPositionEntry(); + if (order == SortOrder.ASC) { + for (int i = 0; i < size; i++) { + builder.appendFloat(bucketValues[i]); + } + } else { + for (int i = (int) size - 1; i >= 0; i--) { + builder.appendFloat(bucketValues[i]); + } + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + /** + * Is this bucket a min heap {@code true} or in gathering mode {@code false}? + */ + private boolean inHeapMode(int bucket) { + return heapMode.get(bucket); + } + + /** + * Get the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private int getNextGatherOffset(long rootIndex) { + return (int) values.get(rootIndex); + } + + /** + * Set the next index that should be "gathered" for a bucket rooted + * at {@code rootIndex}. + */ + private void setNextGatherOffset(long rootIndex, int offset) { + values.set(rootIndex, offset); + } + + /** + * {@code true} if the entry at index {@code lhs} is "better" than + * the entry at {@code rhs}. "Better" in this means "lower" for + * {@link SortOrder#ASC} and "higher" for {@link SortOrder#DESC}. + */ + private boolean betterThan(float lhs, float rhs) { + return getOrder().reverseMul() * Float.compare(lhs, rhs) < 0; + } + + /** + * Swap the data at two indices. + */ + private void swap(long lhs, long rhs) { + var tmp = values.get(lhs); + values.set(lhs, values.get(rhs)); + values.set(rhs, tmp); + } + + /** + * Allocate storage for more buckets and store the "next gather offset" + * for those new buckets. + */ + private void grow(long minSize) { + long oldMax = values.size(); + values = bigArrays.grow(values, minSize); + // Set the next gather offsets for all newly allocated buckets. + setNextGatherOffsets(oldMax - (oldMax % getBucketSize())); + } + + /** + * Maintain the "next gather offsets" for newly allocated buckets. + */ + private void setNextGatherOffsets(long startingAt) { + int nextOffset = getBucketSize() - 1; + for (long bucketRoot = startingAt; bucketRoot < values.size(); bucketRoot += getBucketSize()) { + setNextGatherOffset(bucketRoot, nextOffset); + } + } + + /** + * Heapify a bucket whose entries are in random order. + *

+ * This works by validating the heap property on each node, iterating + * "upwards", pushing any out of order parents "down". Check out the + * wikipedia + * entry on binary heaps for more about this. + *

+ *

+ * While this *looks* like it could easily be {@code O(n * log n)}, it is + * a fairly well studied algorithm attributed to Floyd. There's + * been a bunch of work that puts this at {@code O(n)}, close to 1.88n worst + * case. + *

+ * + * @param rootIndex the index the start of the bucket + */ + private void heapify(long rootIndex) { + int maxParent = bucketSize / 2 - 1; + for (int parent = maxParent; parent >= 0; parent--) { + downHeap(rootIndex, parent); + } + } + + /** + * Correct the heap invariant of a parent and its children. This + * runs in {@code O(log n)} time. + * @param rootIndex index of the start of the bucket + * @param parent Index within the bucket of the parent to check. + * For example, 0 is the "root". + */ + private void downHeap(long rootIndex, int parent) { + while (true) { + long parentIndex = rootIndex + parent; + int worst = parent; + long worstIndex = parentIndex; + int leftChild = parent * 2 + 1; + long leftIndex = rootIndex + leftChild; + if (leftChild < bucketSize) { + if (betterThan(values.get(worstIndex), values.get(leftIndex))) { + worst = leftChild; + worstIndex = leftIndex; + } + int rightChild = leftChild + 1; + long rightIndex = rootIndex + rightChild; + if (rightChild < bucketSize && betterThan(values.get(worstIndex), values.get(rightIndex))) { + worst = rightChild; + worstIndex = rightIndex; + } + } + if (worst == parent) { + break; + } + swap(worstIndex, parentIndex); + parent = worst; + } + } + + @Override + public final void close() { + Releasables.close(values, heapMode); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunction.java new file mode 100644 index 0000000000000..aad616eac95a1 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunction.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link CountDistinctFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class CountDistinctFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("hll", ElementType.BYTES_REF) ); + + private final DriverContext driverContext; + + private final HllStates.SingleState state; + + private final List channels; + + private final int precision; + + public CountDistinctFloatAggregatorFunction(DriverContext driverContext, List channels, + HllStates.SingleState state, int precision) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + this.precision = precision; + } + + public static CountDistinctFloatAggregatorFunction create(DriverContext driverContext, + List channels, int precision) { + return new CountDistinctFloatAggregatorFunction(driverContext, channels, CountDistinctFloatAggregator.initSingle(driverContext.bigArrays(), precision), precision); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + CountDistinctFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + CountDistinctFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { + return; + } + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); + assert hll.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + CountDistinctFloatAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = CountDistinctFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..4c2aad00a7a72 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunctionSupplier.java @@ -0,0 +1,42 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link CountDistinctFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class CountDistinctFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final int precision; + + public CountDistinctFloatAggregatorFunctionSupplier(List channels, int precision) { + this.channels = channels; + this.precision = precision; + } + + @Override + public CountDistinctFloatAggregatorFunction aggregator(DriverContext driverContext) { + return CountDistinctFloatAggregatorFunction.create(driverContext, channels, precision); + } + + @Override + public CountDistinctFloatGroupingAggregatorFunction groupingAggregator( + DriverContext driverContext) { + return CountDistinctFloatGroupingAggregatorFunction.create(channels, driverContext, precision); + } + + @Override + public String describe() { + return "count_distinct of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..60c1755b88c6a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java @@ -0,0 +1,202 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link CountDistinctFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class CountDistinctFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("hll", ElementType.BYTES_REF) ); + + private final HllStates.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final int precision; + + public CountDistinctFloatGroupingAggregatorFunction(List channels, + HllStates.GroupingState state, DriverContext driverContext, int precision) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.precision = precision; + } + + public static CountDistinctFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext, int precision) { + return new CountDistinctFloatGroupingAggregatorFunction(channels, CountDistinctFloatAggregator.initGrouping(driverContext.bigArrays(), precision), driverContext, precision); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + CountDistinctFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { + return; + } + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + CountDistinctFloatAggregator.combineIntermediate(state, groupId, hll.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + HllStates.GroupingState inState = ((CountDistinctFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + CountDistinctFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = CountDistinctFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunction.java new file mode 100644 index 0000000000000..0dcef4341727d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunction.java @@ -0,0 +1,138 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MaxFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.FLOAT), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final FloatState state; + + private final List channels; + + public MaxFloatAggregatorFunction(DriverContext driverContext, List channels, + FloatState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MaxFloatAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MaxFloatAggregatorFunction(driverContext, channels, new FloatState(MaxFloatAggregator.init())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + state.seen(true); + for (int i = 0; i < vector.getPositionCount(); i++) { + state.floatValue(MaxFloatAggregator.combine(state.floatValue(), vector.getFloat(i))); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + state.seen(true); + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + state.floatValue(MaxFloatAggregator.combine(state.floatValue(), block.getFloat(i))); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + FloatVector max = ((FloatBlock) maxUncast).asVector(); + assert max.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + if (seen.getBoolean(0)) { + state.floatValue(MaxFloatAggregator.combine(state.floatValue(), max.getFloat(0))); + state.seen(true); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + if (state.seen() == false) { + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); + return; + } + blocks[offset] = driverContext.blockFactory().newConstantFloatBlockWith(state.floatValue(), 1); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..a3aa44f432430 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MaxFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MaxFloatAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MaxFloatAggregatorFunction aggregator(DriverContext driverContext) { + return MaxFloatAggregatorFunction.create(driverContext, channels); + } + + @Override + public MaxFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MaxFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "max of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..85708792732a7 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java @@ -0,0 +1,208 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MaxFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.FLOAT), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final FloatArrayState state; + + private final List channels; + + private final DriverContext driverContext; + + public MaxFloatGroupingAggregatorFunction(List channels, FloatArrayState state, + DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MaxFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MaxFloatGroupingAggregatorFunction(channels, new FloatArrayState(driverContext.bigArrays(), MaxFloatAggregator.init()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(v))); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(v))); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + FloatVector max = ((FloatBlock) maxUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert max.getPositionCount() == seen.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (seen.getBoolean(groupPosition + positionOffset)) { + state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), max.getFloat(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + FloatArrayState inState = ((MaxFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + if (inState.hasValue(position)) { + state.set(groupId, MaxFloatAggregator.combine(state.getOrDefault(groupId), inState.get(position))); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = state.toValuesBlock(selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunction.java new file mode 100644 index 0000000000000..38a16859140f8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunction.java @@ -0,0 +1,124 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MedianAbsoluteDeviationFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MedianAbsoluteDeviationFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("quart", ElementType.BYTES_REF) ); + + private final DriverContext driverContext; + + private final QuantileStates.SingleState state; + + private final List channels; + + public MedianAbsoluteDeviationFloatAggregatorFunction(DriverContext driverContext, + List channels, QuantileStates.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MedianAbsoluteDeviationFloatAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MedianAbsoluteDeviationFloatAggregatorFunction(driverContext, channels, MedianAbsoluteDeviationFloatAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + MedianAbsoluteDeviationFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + MedianAbsoluteDeviationFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { + return; + } + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); + assert quart.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + MedianAbsoluteDeviationFloatAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = MedianAbsoluteDeviationFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..1fad0faafad4e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunctionSupplier.java @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MedianAbsoluteDeviationFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MedianAbsoluteDeviationFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MedianAbsoluteDeviationFloatAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MedianAbsoluteDeviationFloatAggregatorFunction aggregator(DriverContext driverContext) { + return MedianAbsoluteDeviationFloatAggregatorFunction.create(driverContext, channels); + } + + @Override + public MedianAbsoluteDeviationFloatGroupingAggregatorFunction groupingAggregator( + DriverContext driverContext) { + return MedianAbsoluteDeviationFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "median_absolute_deviation of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..84646476fcee0 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java @@ -0,0 +1,199 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MedianAbsoluteDeviationFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MedianAbsoluteDeviationFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("quart", ElementType.BYTES_REF) ); + + private final QuantileStates.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public MedianAbsoluteDeviationFloatGroupingAggregatorFunction(List channels, + QuantileStates.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MedianAbsoluteDeviationFloatGroupingAggregatorFunction create( + List channels, DriverContext driverContext) { + return new MedianAbsoluteDeviationFloatGroupingAggregatorFunction(channels, MedianAbsoluteDeviationFloatAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + MedianAbsoluteDeviationFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { + return; + } + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + MedianAbsoluteDeviationFloatAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + QuantileStates.GroupingState inState = ((MedianAbsoluteDeviationFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + MedianAbsoluteDeviationFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = MedianAbsoluteDeviationFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunction.java new file mode 100644 index 0000000000000..ecabcbdcf57bb --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunction.java @@ -0,0 +1,138 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MinFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.FLOAT), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final FloatState state; + + private final List channels; + + public MinFloatAggregatorFunction(DriverContext driverContext, List channels, + FloatState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MinFloatAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MinFloatAggregatorFunction(driverContext, channels, new FloatState(MinFloatAggregator.init())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + state.seen(true); + for (int i = 0; i < vector.getPositionCount(); i++) { + state.floatValue(MinFloatAggregator.combine(state.floatValue(), vector.getFloat(i))); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + state.seen(true); + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + state.floatValue(MinFloatAggregator.combine(state.floatValue(), block.getFloat(i))); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + FloatVector min = ((FloatBlock) minUncast).asVector(); + assert min.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + if (seen.getBoolean(0)) { + state.floatValue(MinFloatAggregator.combine(state.floatValue(), min.getFloat(0))); + state.seen(true); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + if (state.seen() == false) { + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); + return; + } + blocks[offset] = driverContext.blockFactory().newConstantFloatBlockWith(state.floatValue(), 1); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..a8ccc70f9996a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MinFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MinFloatAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MinFloatAggregatorFunction aggregator(DriverContext driverContext) { + return MinFloatAggregatorFunction.create(driverContext, channels); + } + + @Override + public MinFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MinFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "min of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..2f00bbf1335ed --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java @@ -0,0 +1,208 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MinFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.FLOAT), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final FloatArrayState state; + + private final List channels; + + private final DriverContext driverContext; + + public MinFloatGroupingAggregatorFunction(List channels, FloatArrayState state, + DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MinFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MinFloatGroupingAggregatorFunction(channels, new FloatArrayState(driverContext.bigArrays(), MinFloatAggregator.init()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(v))); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(v))); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), values.getFloat(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + FloatVector min = ((FloatBlock) minUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert min.getPositionCount() == seen.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (seen.getBoolean(groupPosition + positionOffset)) { + state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), min.getFloat(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + FloatArrayState inState = ((MinFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + if (inState.hasValue(position)) { + state.set(groupId, MinFloatAggregator.combine(state.getOrDefault(groupId), inState.get(position))); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = state.toValuesBlock(selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunction.java new file mode 100644 index 0000000000000..8f0ffd81e64b6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunction.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link PercentileFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class PercentileFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("quart", ElementType.BYTES_REF) ); + + private final DriverContext driverContext; + + private final QuantileStates.SingleState state; + + private final List channels; + + private final double percentile; + + public PercentileFloatAggregatorFunction(DriverContext driverContext, List channels, + QuantileStates.SingleState state, double percentile) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + this.percentile = percentile; + } + + public static PercentileFloatAggregatorFunction create(DriverContext driverContext, + List channels, double percentile) { + return new PercentileFloatAggregatorFunction(driverContext, channels, PercentileFloatAggregator.initSingle(percentile), percentile); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + PercentileFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + PercentileFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { + return; + } + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); + assert quart.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + PercentileFloatAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = PercentileFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..1d1678f15448c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunctionSupplier.java @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link PercentileFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class PercentileFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final double percentile; + + public PercentileFloatAggregatorFunctionSupplier(List channels, double percentile) { + this.channels = channels; + this.percentile = percentile; + } + + @Override + public PercentileFloatAggregatorFunction aggregator(DriverContext driverContext) { + return PercentileFloatAggregatorFunction.create(driverContext, channels, percentile); + } + + @Override + public PercentileFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return PercentileFloatGroupingAggregatorFunction.create(channels, driverContext, percentile); + } + + @Override + public String describe() { + return "percentile of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..564e0e90018c2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java @@ -0,0 +1,202 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link PercentileFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class PercentileFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("quart", ElementType.BYTES_REF) ); + + private final QuantileStates.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final double percentile; + + public PercentileFloatGroupingAggregatorFunction(List channels, + QuantileStates.GroupingState state, DriverContext driverContext, double percentile) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.percentile = percentile; + } + + public static PercentileFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext, double percentile) { + return new PercentileFloatGroupingAggregatorFunction(channels, PercentileFloatAggregator.initGrouping(driverContext.bigArrays(), percentile), driverContext, percentile); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + PercentileFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + PercentileFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + PercentileFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + PercentileFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { + return; + } + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + PercentileFloatAggregator.combineIntermediate(state, groupId, quart.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + QuantileStates.GroupingState inState = ((PercentileFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + PercentileFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = PercentileFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..4b1546314a9cb --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatAggregatorFunctionSupplier.java @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link RateFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final long unitInMillis; + + public RateFloatAggregatorFunctionSupplier(List channels, long unitInMillis) { + this.channels = channels; + this.unitInMillis = unitInMillis; + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public RateFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return RateFloatGroupingAggregatorFunction.create(channels, driverContext, unitInMillis); + } + + @Override + public String describe() { + return "rate of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..40f53741bf3da --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java @@ -0,0 +1,227 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link RateFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class RateFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.FLOAT), + new IntermediateStateDesc("resets", ElementType.DOUBLE) ); + + private final RateFloatAggregator.FloatRateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final long unitInMillis; + + public RateFloatGroupingAggregatorFunction(List channels, + RateFloatAggregator.FloatRateGroupingState state, DriverContext driverContext, + long unitInMillis) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.unitInMillis = unitInMillis; + } + + public static RateFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext, long unitInMillis) { + return new RateFloatGroupingAggregatorFunction(channels, RateFloatAggregator.initGrouping(driverContext, unitInMillis), driverContext, unitInMillis); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + LongBlock timestampsBlock = page.getBlock(channels.get(1)); + LongVector timestampsVector = timestampsBlock.asVector(); + if (timestampsVector == null) { + throw new IllegalStateException("expected @timestamp vector; but got a block"); + } + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateFloatAggregator.combine(state, groupId, timestamps.getLong(v), values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + var valuePosition = groupPosition + positionOffset; + RateFloatAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getFloat(valuePosition)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + RateFloatAggregator.combine(state, groupId, timestamps.getLong(v), values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values, + LongVector timestamps) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + var valuePosition = groupPosition + positionOffset; + RateFloatAggregator.combine(state, groupId, timestamps.getLong(valuePosition), values.getFloat(valuePosition)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + Block resetsUncast = page.getBlock(channels.get(2)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + RateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, resets.getDouble(groupPosition + positionOffset), groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + RateFloatAggregator.FloatRateGroupingState inState = ((RateFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + RateFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = RateFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunction.java new file mode 100644 index 0000000000000..3dedc327294d5 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunction.java @@ -0,0 +1,144 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SumFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class SumFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("value", ElementType.DOUBLE), + new IntermediateStateDesc("delta", ElementType.DOUBLE), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final SumDoubleAggregator.SumState state; + + private final List channels; + + public SumFloatAggregatorFunction(DriverContext driverContext, List channels, + SumDoubleAggregator.SumState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SumFloatAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SumFloatAggregatorFunction(driverContext, channels, SumFloatAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + state.seen(true); + for (int i = 0; i < vector.getPositionCount(); i++) { + SumFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + state.seen(true); + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + SumFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valueUncast = page.getBlock(channels.get(0)); + if (valueUncast.areAllValuesNull()) { + return; + } + DoubleVector value = ((DoubleBlock) valueUncast).asVector(); + assert value.getPositionCount() == 1; + Block deltaUncast = page.getBlock(channels.get(1)); + if (deltaUncast.areAllValuesNull()) { + return; + } + DoubleVector delta = ((DoubleBlock) deltaUncast).asVector(); + assert delta.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(2)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + SumFloatAggregator.combineIntermediate(state, value.getDouble(0), delta.getDouble(0), seen.getBoolean(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + if (state.seen() == false) { + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); + return; + } + blocks[offset] = SumFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..515122ec08ac0 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SumFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class SumFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public SumFloatAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public SumFloatAggregatorFunction aggregator(DriverContext driverContext) { + return SumFloatAggregatorFunction.create(driverContext, channels); + } + + @Override + public SumFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return SumFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "sum of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..c69ce16f0bccb --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java @@ -0,0 +1,212 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SumFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class SumFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("value", ElementType.DOUBLE), + new IntermediateStateDesc("delta", ElementType.DOUBLE), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final SumDoubleAggregator.GroupingSumState state; + + private final List channels; + + private final DriverContext driverContext; + + public SumFloatGroupingAggregatorFunction(List channels, + SumDoubleAggregator.GroupingSumState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SumFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new SumFloatGroupingAggregatorFunction(channels, SumFloatAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + SumFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + SumFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + SumFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + SumFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valueUncast = page.getBlock(channels.get(0)); + if (valueUncast.areAllValuesNull()) { + return; + } + DoubleVector value = ((DoubleBlock) valueUncast).asVector(); + Block deltaUncast = page.getBlock(channels.get(1)); + if (deltaUncast.areAllValuesNull()) { + return; + } + DoubleVector delta = ((DoubleBlock) deltaUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(2)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert value.getPositionCount() == delta.getPositionCount() && value.getPositionCount() == seen.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + SumFloatAggregator.combineIntermediate(state, groupId, value.getDouble(groupPosition + positionOffset), delta.getDouble(groupPosition + positionOffset), seen.getBoolean(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + SumDoubleAggregator.GroupingSumState inState = ((SumFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + SumFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = SumFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunction.java new file mode 100644 index 0000000000000..6232d6ff21fc9 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunction.java @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link TopListFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.FLOAT) ); + + private final DriverContext driverContext; + + private final TopListFloatAggregator.SingleState state; + + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListFloatAggregatorFunction(DriverContext driverContext, List channels, + TopListFloatAggregator.SingleState state, int limit, boolean ascending) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListFloatAggregatorFunction create(DriverContext driverContext, + List channels, int limit, boolean ascending) { + return new TopListFloatAggregatorFunction(driverContext, channels, TopListFloatAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + TopListFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + TopListFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + FloatBlock topList = (FloatBlock) topListUncast; + assert topList.getPositionCount() == 1; + TopListFloatAggregator.combineIntermediate(state, topList); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = TopListFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..ff1c3e8df4b46 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionSupplier.java @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link TopListFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + private final int limit; + + private final boolean ascending; + + public TopListFloatAggregatorFunctionSupplier(List channels, int limit, + boolean ascending) { + this.channels = channels; + this.limit = limit; + this.ascending = ascending; + } + + @Override + public TopListFloatAggregatorFunction aggregator(DriverContext driverContext) { + return TopListFloatAggregatorFunction.create(driverContext, channels, limit, ascending); + } + + @Override + public TopListFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopListFloatGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + } + + @Override + public String describe() { + return "top_list of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..66f8fa7eeb35d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatGroupingAggregatorFunction.java @@ -0,0 +1,202 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link TopListFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class TopListFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("topList", ElementType.FLOAT) ); + + private final TopListFloatAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + private final int limit; + + private final boolean ascending; + + public TopListFloatGroupingAggregatorFunction(List channels, + TopListFloatAggregator.GroupingState state, DriverContext driverContext, int limit, + boolean ascending) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + this.limit = limit; + this.ascending = ascending; + } + + public static TopListFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext, int limit, boolean ascending) { + return new TopListFloatGroupingAggregatorFunction(channels, TopListFloatAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + TopListFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + TopListFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block topListUncast = page.getBlock(channels.get(0)); + if (topListUncast.areAllValuesNull()) { + return; + } + FloatBlock topList = (FloatBlock) topListUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + TopListFloatAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + TopListFloatAggregator.GroupingState inState = ((TopListFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + TopListFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = TopListFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunction.java new file mode 100644 index 0000000000000..c7385e87bfbf2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunction.java @@ -0,0 +1,120 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link ValuesFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class ValuesFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.FLOAT) ); + + private final DriverContext driverContext; + + private final ValuesFloatAggregator.SingleState state; + + private final List channels; + + public ValuesFloatAggregatorFunction(DriverContext driverContext, List channels, + ValuesFloatAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static ValuesFloatAggregatorFunction create(DriverContext driverContext, + List channels) { + return new ValuesFloatAggregatorFunction(driverContext, channels, ValuesFloatAggregator.initSingle(driverContext.bigArrays())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(FloatVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + ValuesFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + ValuesFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + assert values.getPositionCount() == 1; + ValuesFloatAggregator.combineIntermediate(state, values); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = ValuesFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..b4b0c2f1a0444 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link ValuesFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class ValuesFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public ValuesFloatAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public ValuesFloatAggregatorFunction aggregator(DriverContext driverContext) { + return ValuesFloatAggregatorFunction.create(driverContext, channels); + } + + @Override + public ValuesFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return ValuesFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "values of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..54cc06072cd24 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java @@ -0,0 +1,195 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link ValuesFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class ValuesFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.FLOAT) ); + + private final ValuesFloatAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public ValuesFloatGroupingAggregatorFunction(List channels, + ValuesFloatAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static ValuesFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new ValuesFloatGroupingAggregatorFunction(channels, ValuesFloatAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + ValuesFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + ValuesFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + ValuesFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + ValuesFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + ValuesFloatAggregator.combineIntermediate(state, groupId, values, groupPosition + positionOffset); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + ValuesFloatAggregator.GroupingState inState = ((ValuesFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + ValuesFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = ValuesFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregator.java new file mode 100644 index 0000000000000..2159f0864e1cf --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregator.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; + +@Aggregator({ @IntermediateState(name = "hll", type = "BYTES_REF") }) +@GroupingAggregator +public class CountDistinctFloatAggregator { + + public static HllStates.SingleState initSingle(BigArrays bigArrays, int precision) { + return new HllStates.SingleState(bigArrays, precision); + } + + public static void combine(HllStates.SingleState current, float v) { + current.collect(v); + } + + public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { + current.merge(0, inValue, 0); + } + + public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { + long result = state.cardinality(); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); + } + + public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { + return new HllStates.GroupingState(bigArrays, precision); + } + + public static void combine(HllStates.GroupingState current, int groupId, float v) { + current.collect(groupId, v); + } + + public static void combineIntermediate(HllStates.GroupingState current, int groupId, BytesRef inValue) { + current.merge(groupId, inValue, 0); + } + + public static void combineStates( + HllStates.GroupingState current, + int currentGroupId, + HllStates.GroupingState state, + int statePosition + ) { + current.merge(currentGroupId, state.hll, statePosition); + } + + public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + long count = state.cardinality(group); + builder.appendLong(count); + } + return builder.build(); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxFloatAggregator.java new file mode 100644 index 0000000000000..eea436541069e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxFloatAggregator.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +@Aggregator({ @IntermediateState(name = "max", type = "FLOAT"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MaxFloatAggregator { + + public static float init() { + return Float.MIN_VALUE; + } + + public static float combine(float current, float v) { + return Math.max(current, v); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregator.java new file mode 100644 index 0000000000000..b81cc945f0695 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregator.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; + +@Aggregator({ @IntermediateState(name = "quart", type = "BYTES_REF") }) +@GroupingAggregator +class MedianAbsoluteDeviationFloatAggregator { + + public static QuantileStates.SingleState initSingle() { + return new QuantileStates.SingleState(QuantileStates.MEDIAN); + } + + public static void combine(QuantileStates.SingleState current, float v) { + current.add(v); + } + + public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { + state.add(inValue); + } + + public static Block evaluateFinal(QuantileStates.SingleState state, DriverContext driverContext) { + return state.evaluateMedianAbsoluteDeviation(driverContext); + } + + public static QuantileStates.GroupingState initGrouping(BigArrays bigArrays) { + return new QuantileStates.GroupingState(bigArrays, QuantileStates.MEDIAN); + } + + public static void combine(QuantileStates.GroupingState state, int groupId, float v) { + state.add(groupId, v); + } + + public static void combineIntermediate(QuantileStates.GroupingState state, int groupId, BytesRef inValue) { + state.add(groupId, inValue); + } + + public static void combineStates( + QuantileStates.GroupingState current, + int currentGroupId, + QuantileStates.GroupingState state, + int statePosition + ) { + current.add(currentGroupId, state.getOrNull(statePosition)); + } + + public static Block evaluateFinal(QuantileStates.GroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateMedianAbsoluteDeviation(selected, driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinFloatAggregator.java new file mode 100644 index 0000000000000..9ea52eab846c1 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinFloatAggregator.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +@Aggregator({ @IntermediateState(name = "min", type = "FLOAT"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MinFloatAggregator { + + public static float init() { + return Float.POSITIVE_INFINITY; + } + + public static float combine(float current, float v) { + return Math.min(current, v); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileFloatAggregator.java new file mode 100644 index 0000000000000..37b68b3c31335 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileFloatAggregator.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; + +@Aggregator({ @IntermediateState(name = "quart", type = "BYTES_REF") }) +@GroupingAggregator +class PercentileFloatAggregator { + + public static QuantileStates.SingleState initSingle(double percentile) { + return new QuantileStates.SingleState(percentile); + } + + public static void combine(QuantileStates.SingleState current, float v) { + current.add(v); + } + + public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { + state.add(inValue); + } + + public static Block evaluateFinal(QuantileStates.SingleState state, DriverContext driverContext) { + return state.evaluatePercentile(driverContext); + } + + public static QuantileStates.GroupingState initGrouping(BigArrays bigArrays, double percentile) { + return new QuantileStates.GroupingState(bigArrays, percentile); + } + + public static void combine(QuantileStates.GroupingState state, int groupId, float v) { + state.add(groupId, v); + } + + public static void combineIntermediate(QuantileStates.GroupingState state, int groupId, BytesRef inValue) { + state.add(groupId, inValue); + } + + public static void combineStates( + QuantileStates.GroupingState current, + int currentGroupId, + QuantileStates.GroupingState state, + int statePosition + ) { + current.add(currentGroupId, state.getOrNull(statePosition)); + } + + public static Block evaluateFinal(QuantileStates.GroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluatePercentile(selected, driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumFloatAggregator.java new file mode 100644 index 0000000000000..ea6b55b949f15 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumFloatAggregator.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +@Aggregator( + { + @IntermediateState(name = "value", type = "DOUBLE"), + @IntermediateState(name = "delta", type = "DOUBLE"), + @IntermediateState(name = "seen", type = "BOOLEAN") } +) +@GroupingAggregator +class SumFloatAggregator extends SumDoubleAggregator { + + public static void combine(SumState current, float v) { + current.add(v); + } + + public static void combine(GroupingSumState current, int groupId, float v) { + current.add(v, groupId); + } + +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st index 246aebe2c08ec..18686928f14a8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st @@ -17,7 +17,7 @@ import org.elasticsearch.compute.data.$Type$Block; $if(int)$ import org.elasticsearch.compute.data.$Type$Vector; $endif$ -$if(double)$ +$if(double||float)$ import org.elasticsearch.compute.data.IntVector; $endif$ import org.elasticsearch.compute.operator.DriverContext; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st index 212a017cb300d..2581d3ebbf80b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st @@ -18,7 +18,9 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; -$if(int)$ +$if(float)$ +import org.elasticsearch.compute.data.FloatBlock; +$elseif(int)$ import org.elasticsearch.compute.data.IntBlock; $endif$ import org.elasticsearch.compute.data.IntVector; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st index f9b15ccd34092..ea62dcf295825 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st @@ -27,7 +27,7 @@ import org.elasticsearch.compute.ann.GroupingAggregator; import org.elasticsearch.compute.ann.IntermediateState; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -$if(int||double||BytesRef)$ +$if(int||double||float||BytesRef)$ import org.elasticsearch.compute.data.$Type$Block; $endif$ import org.elasticsearch.compute.data.IntVector; @@ -55,7 +55,9 @@ class Values$Type$Aggregator { } public static void combine(SingleState state, $type$ v) { -$if(double)$ +$if(float)$ + state.values.add(Float.floatToIntBits(v)); +$elseif(double)$ state.values.add(Double.doubleToLongBits(v)); $else$ state.values.add(v); @@ -98,6 +100,12 @@ $elseif(int)$ * the top 32 bits for the group, the bottom 32 for the value. */ state.values.add((((long) groupId) << Integer.SIZE) | (v & 0xFFFFFFFFL)); +$elseif(float)$ + /* + * Encode the groupId and value into a single long - + * the top 32 bits for the group, the bottom 32 for the value. + */ + state.values.add((((long) groupId) << Float.SIZE) | (Float.floatToIntBits(v) & 0xFFFFFFFFL)); $endif$ } @@ -132,6 +140,11 @@ $elseif(int)$ int group = (int) (both >>> Integer.SIZE); if (group == statePosition) { int value = (int) both; +$elseif(float)$ + long both = state.values.get(id); + int group = (int) (both >>> Float.SIZE); + if (group == statePosition) { + float value = Float.intBitsToFloat((int) both); $endif$ combine(current, currentGroupId, $if(BytesRef)$state.bytes.get(value, scratch)$else$value$endif$); } @@ -172,6 +185,8 @@ $endif$ if (values.size() == 1) { $if(long)$ return blockFactory.newConstantLongBlockWith(values.get(0), 1); +$elseif(float)$ + return blockFactory.newConstantFloatBlockWith(Float.intBitsToFloat((int) values.get(0)), 1); $elseif(double)$ return blockFactory.newConstantDoubleBlockWith(Double.longBitsToDouble(values.get(0)), 1); $elseif(int)$ @@ -185,6 +200,8 @@ $endif$ for (int id = 0; id < values.size(); id++) { $if(long)$ builder.appendLong(values.get(id)); +$elseif(float)$ + builder.appendFloat(Float.intBitsToFloat((int) values.get(id))); $elseif(double)$ builder.appendDouble(Double.longBitsToDouble(values.get(id))); $elseif(int)$ @@ -219,7 +236,7 @@ $elseif(BytesRef)$ private final LongLongHash values; private final BytesRefHash bytes; -$elseif(int)$ +$elseif(int||float)$ private final LongHash values; $endif$ @@ -229,7 +246,7 @@ $if(long||double)$ $elseif(BytesRef)$ values = new LongLongHash(1, bigArrays); bytes = new BytesRefHash(1, bigArrays); -$elseif(int)$ +$elseif(int||float)$ values = new LongHash(1, bigArrays); $endif$ } @@ -262,6 +279,11 @@ $if(long||BytesRef)$ $elseif(double)$ if (values.getKey1(id) == selectedGroup) { double value = Double.longBitsToDouble(values.getKey2(id)); +$elseif(float)$ + long both = values.get(id); + int group = (int) (both >>> Float.SIZE); + if (group == selectedGroup) { + float value = Float.intBitsToFloat((int) both); $elseif(int)$ long both = values.get(id); int group = (int) (both >>> Integer.SIZE); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index 184f28e750aec..11ccfb55a77aa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -140,7 +140,7 @@ LuceneScorer getCurrentOrLoadNextScorer() { logger.trace("Starting {}", partialLeaf); final LeafReaderContext leaf = partialLeaf.leafReaderContext(); if (currentScorer == null || currentScorer.leafReaderContext() != leaf) { - final Weight weight = currentSlice.weight().get(); + final Weight weight = currentSlice.weight(); processedQueries.add(weight.getQuery()); currentScorer = new LuceneScorer(currentSlice.shardContext(), weight, leaf); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSlice.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSlice.java index 716df6844e79f..d7b6a86e07905 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSlice.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSlice.java @@ -10,12 +10,11 @@ import org.apache.lucene.search.Weight; import java.util.List; -import java.util.function.Supplier; /** * Holds a list of multiple partial Lucene segments */ -public record LuceneSlice(ShardContext shardContext, List leaves, Supplier weight) { +public record LuceneSlice(ShardContext shardContext, List leaves, Weight weight) { int numLeaves() { return leaves.size(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java index f3bcdc7593dab..1c9c97a364fc7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java @@ -20,7 +20,6 @@ import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.function.Function; -import java.util.function.Supplier; /** * Shared Lucene slices between Lucene operators. @@ -64,16 +63,7 @@ public static LuceneSliceQueue create( case SEGMENT -> segmentSlices(leafContexts); case DOC -> docSlices(ctx.searcher().getIndexReader(), taskConcurrency); }; - final Weight[] cachedWeight = new Weight[1]; - final Supplier weight = () -> { - if (cachedWeight[0] == null) { - cachedWeight[0] = weightFunction.apply(ctx); - } - return cachedWeight[0]; - }; - if (groups.size() > 1) { - weight.get(); // eagerly build Weight once - } + final Weight weight = weightFunction.apply(ctx); for (List group : groups) { if (group.isEmpty() == false) { slices.add(new LuceneSlice(ctx, group, weight)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java index 3dde3ba75be78..8d37feb37d8b6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.lucene; -import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; @@ -18,7 +17,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.common.Rounding; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.DocVector; @@ -29,8 +27,6 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.Releasables; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import java.io.IOException; import java.io.UncheckedIOException; @@ -52,25 +48,21 @@ public class TimeSeriesSortedSourceOperatorFactory extends LuceneOperator.Factory { private final int maxPageSize; - private final TimeValue timeSeriesPeriod; private TimeSeriesSortedSourceOperatorFactory( List contexts, Function queryFunction, int taskConcurrency, int maxPageSize, - TimeValue timeSeriesPeriod, int limit ) { super(contexts, queryFunction, DataPartitioning.SHARD, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); this.maxPageSize = maxPageSize; - this.timeSeriesPeriod = timeSeriesPeriod; } @Override public SourceOperator get(DriverContext driverContext) { - var rounding = timeSeriesPeriod.equals(TimeValue.ZERO) == false ? Rounding.builder(timeSeriesPeriod).build() : null; - return new Impl(driverContext.blockFactory(), sliceQueue, maxPageSize, limit, rounding); + return new Impl(driverContext.blockFactory(), sliceQueue, maxPageSize, limit); } @Override @@ -82,18 +74,10 @@ public static TimeSeriesSortedSourceOperatorFactory create( int limit, int maxPageSize, int taskConcurrency, - TimeValue timeSeriesPeriod, List searchContexts, Function queryFunction ) { - return new TimeSeriesSortedSourceOperatorFactory( - searchContexts, - queryFunction, - taskConcurrency, - maxPageSize, - timeSeriesPeriod, - limit - ); + return new TimeSeriesSortedSourceOperatorFactory(searchContexts, queryFunction, taskConcurrency, maxPageSize, limit); } static final class Impl extends SourceOperator { @@ -101,20 +85,18 @@ static final class Impl extends SourceOperator { private final int maxPageSize; private final BlockFactory blockFactory; private final LuceneSliceQueue sliceQueue; - private final Rounding.Prepared rounding; private int currentPagePos = 0; private int remainingDocs; private boolean doneCollecting; private IntVector.Builder docsBuilder; private IntVector.Builder segmentsBuilder; private LongVector.Builder timestampsBuilder; - private LongVector.Builder intervalsBuilder; // TODO: add an ordinal block for tsid hashes // (This allows for efficiently grouping by tsid locally, no need to use bytes representation of tsid hash) private BytesRefVector.Builder tsHashesBuilder; private TimeSeriesIterator iterator; - Impl(BlockFactory blockFactory, LuceneSliceQueue sliceQueue, int maxPageSize, int limit, Rounding rounding) { + Impl(BlockFactory blockFactory, LuceneSliceQueue sliceQueue, int maxPageSize, int limit) { this.maxPageSize = maxPageSize; this.blockFactory = blockFactory; this.remainingDocs = limit; @@ -123,27 +105,6 @@ static final class Impl extends SourceOperator { this.timestampsBuilder = blockFactory.newLongVectorBuilder(Math.min(limit, maxPageSize)); this.tsHashesBuilder = blockFactory.newBytesRefVectorBuilder(Math.min(limit, maxPageSize)); this.sliceQueue = sliceQueue; - if (rounding != null) { - try { - long minTimestamp = Long.MAX_VALUE; - long maxTimestamp = Long.MIN_VALUE; - for (var slice : sliceQueue.getSlices()) { - for (var leaf : slice.leaves()) { - var pointValues = leaf.leafReaderContext().reader().getPointValues(DataStreamTimestampFieldMapper.DEFAULT_PATH); - long segmentMin = LongPoint.decodeDimension(pointValues.getMinPackedValue(), 0); - minTimestamp = Math.min(segmentMin, minTimestamp); - long segmentMax = LongPoint.decodeDimension(pointValues.getMaxPackedValue(), 0); - maxTimestamp = Math.max(segmentMax, maxTimestamp); - } - } - this.rounding = rounding.prepare(minTimestamp, maxTimestamp); - this.intervalsBuilder = blockFactory.newLongVectorBuilder(Math.min(limit, maxPageSize)); - } catch (IOException ioe) { - throw new UncheckedIOException(ioe); - } - } else { - this.rounding = null; - } } @Override @@ -172,7 +133,6 @@ public Page getOutput() { IntVector leaf = null; IntVector docs = null; LongVector timestamps = null; - LongVector intervals = null; BytesRefVector tsids = null; try { if (iterator == null) { @@ -201,20 +161,13 @@ public Page getOutput() { timestamps = timestampsBuilder.build(); timestampsBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); - if (rounding != null) { - intervals = intervalsBuilder.build(); - intervalsBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); - } else { - intervals = blockFactory.newConstantLongVector(0, timestamps.getPositionCount()); - } tsids = tsHashesBuilder.build(); tsHashesBuilder = blockFactory.newBytesRefVectorBuilder(Math.min(remainingDocs, maxPageSize)); page = new Page( currentPagePos, new DocVector(shard.asVector(), leaf, docs, leaf.isConstant()).asBlock(), tsids.asBlock(), - timestamps.asBlock(), - intervals.asBlock() + timestamps.asBlock() ); currentPagePos = 0; @@ -225,7 +178,7 @@ public Page getOutput() { throw new UncheckedIOException(e); } finally { if (page == null) { - Releasables.closeExpectNoException(shard, leaf, docs, timestamps, tsids, intervals); + Releasables.closeExpectNoException(shard, leaf, docs, timestamps, tsids); } } return page; @@ -233,7 +186,7 @@ public Page getOutput() { @Override public void close() { - Releasables.closeExpectNoException(docsBuilder, segmentsBuilder, timestampsBuilder, intervalsBuilder, tsHashesBuilder); + Releasables.closeExpectNoException(docsBuilder, segmentsBuilder, timestampsBuilder, tsHashesBuilder); } class TimeSeriesIterator { @@ -246,7 +199,7 @@ class TimeSeriesIterator { TimeSeriesIterator(LuceneSlice slice) throws IOException { this.slice = slice; - Weight weight = slice.weight().get(); + Weight weight = slice.weight(); if (slice.numLeaves() == 1) { queue = null; leaf = new Leaf(weight, slice.getLeaf(0).leafReaderContext()); @@ -289,9 +242,6 @@ void consume() throws IOException { segmentsBuilder.appendInt(leaf.segmentOrd); docsBuilder.appendInt(leaf.iterator.docID()); timestampsBuilder.appendLong(leaf.timestamp); - if (rounding != null) { - intervalsBuilder.appendLong(rounding.round(leaf.timestamp)); - } tsHashesBuilder.appendBytesRef(currentTsid); final Leaf newTop; if (leaf.nextDoc()) { @@ -318,9 +268,6 @@ void consume() throws IOException { while (leaf.nextDoc()) { tsHashesBuilder.appendBytesRef(leaf.timeSeriesHash); timestampsBuilder.appendLong(leaf.timestamp); - if (rounding != null) { - intervalsBuilder.appendLong(rounding.round(leaf.timestamp)); - } // Don't append segment ord, because there is only one segment. docsBuilder.appendInt(leaf.iterator.docID()); currentPagePos++; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java index 08e8bca64bbe7..6e56b96bda06e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java @@ -16,6 +16,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -223,6 +224,11 @@ protected static Stream allBooleans(Block input) { return allValueOffsets(b).mapToObj(i -> b.getBoolean(i)); } + protected static Stream allFloats(Block input) { + FloatBlock b = (FloatBlock) input; + return allValueOffsets(b).mapToObj(b::getFloat); + } + protected static DoubleStream allDoubles(Block input) { DoubleBlock b = (DoubleBlock) input; return allValueOffsets(b).mapToDouble(i -> b.getDouble(i)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..7f520de393d73 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctFloatAggregatorFunctionTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.BasicBlockTests; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; + +public class CountDistinctFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceFloatBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToObj(l -> ESTestCase.randomFloat())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctFloatAggregatorFunctionSupplier(inputChannels, 40000); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "count_distinct of floats"; + } + + @Override + protected void assertSimpleOutput(List input, Block result) { + long expected = input.stream().flatMap(AggregatorFunctionTestCase::allFloats).distinct().count(); + + long count = ((LongBlock) result).getLong(0); + // HLL is an approximation algorithm and precision depends on the number of values computed and the precision_threshold param + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html + // For a number of values close to 10k and precision_threshold=1000, precision should be less than 10% + assertThat((double) count, closeTo(expected, expected * .1)); + } + + @Override + protected void assertOutputFromEmpty(Block b) { + assertThat(b.getPositionCount(), equalTo(1)); + assertThat(BasicBlockTests.valuesAtPositions(b, 0, 1), equalTo(List.of(List.of(0L)))); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..03a11bb976b21 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunctionTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongFloatTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; + +public class CountDistinctFloatGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctFloatAggregatorFunctionSupplier(inputChannels, 40000); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "count_distinct of floats"; + } + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new LongFloatTupleBlockSourceOperator( + blockFactory, + LongStream.range(0, size).mapToObj(l -> Tuple.tuple(randomGroupId(size), randomFloatBetween(0, 100, true))) + ); + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + long distinct = input.stream().flatMap(p -> allFloats(p, group)).distinct().count(); + long count = ((LongBlock) result).getLong(position); + // HLL is an approximation algorithm and precision depends on the number of values computed and the precision_threshold param + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html + // For a number of values close to 10k and precision_threshold=1000, precision should be less than 10% + assertThat((double) count, closeTo(distinct, distinct * 0.1)); + } + + @Override + protected void assertOutputFromNullOnly(Block b, int position) { + assertThat(b.isNull(position), equalTo(false)); + assertThat(b.getValueCount(position), equalTo(1)); + assertThat(((LongBlock) b).getLong(b.getFirstValueIndex(position)), equalTo(0L)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index d10e1bada5580..3436d6b537611 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -17,6 +17,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; @@ -202,6 +203,7 @@ protected void appendNull(ElementType elementType, Block.Builder builder, int bl append(builder, switch (elementType) { case BOOLEAN -> randomBoolean(); case BYTES_REF -> new BytesRef(randomAlphaOfLength(3)); + case FLOAT -> randomFloat(); case DOUBLE -> randomDouble(); case INT -> 1; case LONG -> 1L; @@ -276,7 +278,7 @@ public final void testMulitvaluedNullGroupsAndValues() { assertSimpleOutput(origInput, results); } - public void testMulitvaluedNullGroup() { + public final void testMulitvaluedNullGroup() { DriverContext driverContext = driverContext(); BlockFactory blockFactory = driverContext.blockFactory(); int end = between(1, 2); // TODO revert @@ -479,6 +481,11 @@ protected static Stream allBooleans(Page page, Long group) { return allValueOffsets(page, group).mapToObj(i -> b.getBoolean(i)); } + protected static Stream allFloats(Page page, Long group) { + FloatBlock b = page.getBlock(1); + return allValueOffsets(page, group).mapToObj(b::getFloat); + } + protected static DoubleStream allDoubles(Page page, Long group) { DoubleBlock b = page.getBlock(1); return allValueOffsets(page, group).mapToDouble(i -> b.getDouble(i)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..5e14a99fd0fa2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxFloatAggregatorFunctionTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceFloatBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToObj(l -> ESTestCase.randomFloat())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of floats"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Float max = input.stream().flatMap(AggregatorFunctionTestCase::allFloats).max(floatComparator()).get(); + assertThat(((FloatBlock) result).getFloat(0), equalTo(max)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..0abcb05a91af6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunctionTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongFloatTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; + +import java.util.List; +import java.util.Optional; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxFloatGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { + return new LongFloatTupleBlockSourceOperator( + blockFactory, + LongStream.range(0, end).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), randomFloat())) + ); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of floats"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + Optional max = input.stream().flatMap(p -> allFloats(p, group)).max(floatComparator()); + if (max.isEmpty()) { + assertThat(result.isNull(position), equalTo(true)); + return; + } + assertThat(result.isNull(position), equalTo(false)); + assertThat(((FloatBlock) result).getFloat(position), equalTo(max.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java index 8eba1842d688d..a6ca769036e54 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java @@ -76,9 +76,4 @@ static double median(DoubleStream s) { int c = data.length / 2; return data.length % 2 == 0 ? (data[c - 1] + data[c]) / 2 : data[c]; } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101569") - public void testMulitvaluedNullGroup() { - // only here for muting it - } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..786603e12f9c8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatAggregatorFunctionTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.Matchers.closeTo; + +public class MedianAbsoluteDeviationFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { + List values = Arrays.asList(1.2f, 1.25f, 2.0f, 2.0f, 4.3f, 6.0f, 9.0f); + Randomness.shuffle(values); + return new SequenceFloatBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "median_absolute_deviation of floats"; + } + + @Override + protected void assertSimpleOutput(List input, Block result) { + assertThat(((DoubleBlock) result).getDouble(0), closeTo(0.8, 0.001d)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..14416b3aec1ee --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunctionTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongFloatTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.closeTo; + +public class MedianAbsoluteDeviationFloatGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { + Float[][] samples = new Float[][] { + { 1.2f, 1.25f, 2.0f, 2.0f, 4.3f, 6.0f, 9.0f }, + { 0.1f, 1.5f, 2.0f, 3.0f, 4.0f, 7.5f, 100.0f }, + { 0.2f, 1.75f, 2.0f, 2.5f }, + { 0.5f, 3.0f, 3.0f, 3.0f, 4.3f }, + { 0.25f, 1.5f, 3.0f } }; + List> values = new ArrayList<>(); + for (int i = 0; i < samples.length; i++) { + List list = Arrays.stream(samples[i]).collect(Collectors.toList()); + Randomness.shuffle(list); + for (float v : list) { + values.add(Tuple.tuple((long) i, v)); + } + } + return new LongFloatTupleBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "median_absolute_deviation of floats"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + double medianAbsoluteDeviation = medianAbsoluteDeviation(input.stream().flatMap(p -> allFloats(p, group))); + assertThat(((DoubleBlock) result).getDouble(position), closeTo(medianAbsoluteDeviation, medianAbsoluteDeviation * .000001)); + } + + static double medianAbsoluteDeviation(Stream s) { + Float[] data = s.toArray(Float[]::new); + float median = median(Arrays.stream(data)); + return median(Arrays.stream(data).map(d -> Math.abs(median - d))); + } + + static float median(Stream s) { + // The input data is small enough that tdigest will find the actual median. + Float[] data = s.sorted().toArray(Float[]::new); + if (data.length == 0) { + return 0; + } + int c = data.length / 2; + return data.length % 2 == 0 ? (data[c - 1] + data[c]) / 2 : data[c]; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..59a09569c65a2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinFloatAggregatorFunctionTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceFloatBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToObj(l -> ESTestCase.randomFloat())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of floats"; + } + + @Override + protected void assertSimpleOutput(List input, Block result) { + Float min = input.stream().flatMap(b -> allFloats(b)).min(floatComparator()).get(); + assertThat(((FloatBlock) result).getFloat(0), equalTo(min)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..be41e058f60da --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunctionTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongFloatTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; + +import java.util.List; +import java.util.Optional; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinFloatGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { + return new LongFloatTupleBlockSourceOperator( + blockFactory, + LongStream.range(0, end).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), randomFloat())) + ); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of floats"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + Optional min = input.stream().flatMap(p -> allFloats(p, group)).min(floatComparator()); + if (min.isEmpty()) { + assertThat(result.isNull(position), equalTo(true)); + return; + } + assertThat(result.isNull(position), equalTo(false)); + assertThat(((FloatBlock) result).getFloat(position), equalTo(min.get())); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..6e4a1e09640dc --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileFloatAggregatorFunctionTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.closeTo; + +public class PercentileFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + + private double percentile; + + @Before + public void initParameters() { + percentile = randomFrom(0, 1, 5, 10, 25, 50, 75, 90, 95, 99, 100); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileFloatAggregatorFunctionSupplier(inputChannels, percentile); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "percentile of floats"; + } + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceFloatBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToObj(l -> ESTestCase.randomFloat())); + } + + @Override + protected void assertSimpleOutput(List input, Block result) { + TDigestState td = TDigestState.create(QuantileStates.DEFAULT_COMPRESSION); + input.stream().flatMap(AggregatorFunctionTestCase::allFloats).forEach(td::add); + double expected = td.quantile(percentile / 100); + double value = ((DoubleBlock) result).getDouble(0); + assertThat(value, closeTo(expected, expected * 0.1)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..84a97a7cd30ac --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunctionTests.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongFloatTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.junit.Before; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.closeTo; + +public class PercentileFloatGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + + private double percentile; + + @Before + public void initParameters() { + percentile = randomFrom(0, 1, 5, 10, 25, 50, 75, 90, 95, 99, 100); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileFloatAggregatorFunctionSupplier(inputChannels, percentile); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "percentile of floats"; + } + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { + return new LongFloatTupleBlockSourceOperator( + blockFactory, + LongStream.range(0, end).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), randomFloat())) + ); + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + TDigestState td = TDigestState.create(QuantileStates.DEFAULT_COMPRESSION); + input.stream().flatMap(p -> allFloats(p, group)).forEach(td::add); + if (td.size() > 0) { + double expected = td.quantile(percentile / 100); + double value = ((DoubleBlock) result).getDouble(position); + assertThat(value, closeTo(expected, expected * 0.1)); + } else { + assertTrue(result.isNull(position)); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..d365f02d289c8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumFloatAggregatorFunctionTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.LongStream; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; + +public class SumFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceFloatBlockSourceOperator(blockFactory, LongStream.range(0, size).mapToObj(l -> ESTestCase.randomFloat())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "sum of floats"; + } + + @Override + protected void assertSimpleOutput(List input, Block result) { + double sum = input.stream().flatMap(AggregatorFunctionTestCase::allFloats).mapToDouble(f -> (double) f).sum(); + assertThat(((DoubleBlock) result).getDouble(0), closeTo(sum, .0001)); + } + + public void testOverflowSucceeds() { + DriverContext driverContext = driverContext(); + List results = new ArrayList<>(); + try ( + Driver d = new Driver( + driverContext, + new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(Float.MAX_VALUE - 1, 2f)), + List.of(simple().get(driverContext)), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ) { + runDriver(d); + } + assertThat(results.get(0).getBlock(0).getDouble(0), equalTo((double) Float.MAX_VALUE + 1)); + assertDriverContext(driverContext); + } + + public void testSummationAccuracy() { + DriverContext driverContext = driverContext(); + List results = new ArrayList<>(); + try ( + Driver d = new Driver( + driverContext, + new SequenceFloatBlockSourceOperator( + driverContext.blockFactory(), + Stream.of(0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f) + ), + List.of(simple().get(driverContext)), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ) { + runDriver(d); + } + assertEquals(15.3, results.get(0).getBlock(0).getDouble(0), 0.001); + assertDriverContext(driverContext); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + results.clear(); + int n = randomIntBetween(5, 10); + Float[] values = new Float[n]; + float sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Float.NaN, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY) + : randomFloatBetween(Float.MIN_VALUE, Float.MAX_VALUE, true); + sum += values[i]; + } + driverContext = driverContext(); + try ( + Driver d = new Driver( + driverContext, + new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(values)), + List.of(simple().get(driverContext)), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ) { + runDriver(d); + } + assertEquals(sum, results.get(0).getBlock(0).getDouble(0), 1e-10); + assertDriverContext(driverContext); + + // Summing up some big float values and expect a big double result + results.clear(); + n = randomIntBetween(5, 10); + Float[] largeValues = new Float[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Float.MAX_VALUE; + } + driverContext = driverContext(); + try ( + Driver d = new Driver( + driverContext, + new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(largeValues)), + List.of(simple().get(driverContext)), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ) { + runDriver(d); + } + assertEquals((double) Float.MAX_VALUE * n, results.get(0).getBlock(0).getDouble(0), 0d); + assertDriverContext(driverContext); + + results.clear(); + for (int i = 0; i < n; i++) { + largeValues[i] = -Float.MAX_VALUE; + } + driverContext = driverContext(); + try ( + Driver d = new Driver( + driverContext, + new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(largeValues)), + List.of(simple().get(driverContext)), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ) { + runDriver(d); + } + assertEquals((double) -Float.MAX_VALUE * n, results.get(0).getBlock(0).getDouble(0), 0d); + assertDriverContext(driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..54bd92cbfff21 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunctionTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongFloatTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.search.aggregations.metrics.CompensatedSum; + +import java.util.List; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.closeTo; + +public class SumFloatGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { + return new LongFloatTupleBlockSourceOperator( + blockFactory, + LongStream.range(0, end).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), randomFloat())) + ); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "sum of floats"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + CompensatedSum sum = new CompensatedSum(); + input.stream().flatMap(p -> allFloats(p, group)).mapToDouble(f -> (double) f).forEach(sum::add); + // Won't precisely match in distributed case but will be close + assertThat(((DoubleBlock) result).getDouble(position), closeTo(sum.value(), 0.01)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..98a016783955e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.contains; + +public class TopListFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + private static final int LIMIT = 100; + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceFloatBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToObj(l -> randomFloat())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new TopListFloatAggregatorFunctionSupplier(inputChannels, LIMIT, true); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "top_list of floats"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Object[] values = input.stream().flatMap(b -> allFloats(b)).sorted().limit(LIMIT).toArray(Object[]::new); + assertThat((List) BlockUtils.toJavaObject(result, 0), contains(values)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java index b5525b985be90..e5bb8e3138e25 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java @@ -13,7 +13,9 @@ import org.elasticsearch.compute.operator.SequenceDoubleBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -37,7 +39,14 @@ protected String expectedDescriptionOfAggregator() { @Override public void assertSimpleOutput(List input, Block result) { - Object[] values = input.stream().flatMapToDouble(b -> allDoubles(b)).boxed().collect(Collectors.toSet()).toArray(Object[]::new); - assertThat((List) BlockUtils.toJavaObject(result, 0), containsInAnyOrder(values)); + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); + Object[] values = input.stream() + .flatMapToDouble(AggregatorFunctionTestCase::allDoubles) + .boxed() + .collect(Collectors.toSet()) + .toArray(Object[]::new); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java index 4554a60b7a00c..a4b1a3c028e43 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.Tuple; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -53,7 +55,12 @@ public void assertSimpleGroup(List input, Block result, int position, Long switch (values.length) { case 0 -> assertThat(resultValue, nullValue()); case 1 -> assertThat(resultValue, equalTo(values[0])); - default -> assertThat((List) resultValue, containsInAnyOrder(values)); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionTests.java new file mode 100644 index 0000000000000..67068ce10c997 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Arrays; +import java.util.List; +import java.util.TreeSet; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.containsInAnyOrder; + +public class ValuesFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceFloatBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToObj(i -> randomFloat())); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new ValuesFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "values of floats"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); + Object[] values = input.stream().flatMap(AggregatorFunctionTestCase::allFloats).collect(Collectors.toSet()).toArray(Object[]::new); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..e25d7567a1933 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunctionTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.LongFloatTupleBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; + +import java.util.Arrays; +import java.util.List; +import java.util.TreeSet; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class ValuesFloatGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new ValuesFloatAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "values of floats"; + } + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new LongFloatTupleBlockSourceOperator( + blockFactory, + LongStream.range(0, size).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), randomFloat())) + ); + } + + @Override + public void assertSimpleGroup(List input, Block result, int position, Long group) { + Object[] values = input.stream().flatMap(p -> allFloats(p, group)).collect(Collectors.toSet()).toArray(Object[]::new); + Object resultValue = BlockUtils.toJavaObject(result, position); + switch (values.length) { + case 0 -> assertThat(resultValue, nullValue()); + case 1 -> assertThat(resultValue, equalTo(values[0])); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java index 46e31b589997a..c60707046a0b1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java @@ -7,20 +7,20 @@ package org.elasticsearch.compute.aggregation; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.operator.SequenceIntBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.hamcrest.Matchers.containsInAnyOrder; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109932") public class ValuesIntAggregatorFunctionTests extends AggregatorFunctionTestCase { @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @@ -39,7 +39,14 @@ protected String expectedDescriptionOfAggregator() { @Override public void assertSimpleOutput(List input, Block result) { - Object[] values = input.stream().flatMapToInt(b -> allInts(b)).boxed().collect(Collectors.toSet()).toArray(Object[]::new); - assertThat((List) BlockUtils.toJavaObject(result, 0), containsInAnyOrder(values)); + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); + Object[] values = input.stream() + .flatMapToInt(AggregatorFunctionTestCase::allInts) + .boxed() + .collect(Collectors.toSet()) + .toArray(Object[]::new); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java index 831e2c1fdfd68..154b076d6a246 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.Tuple; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -49,7 +51,12 @@ public void assertSimpleGroup(List input, Block result, int position, Long switch (values.length) { case 0 -> assertThat(resultValue, nullValue()); case 1 -> assertThat(resultValue, equalTo(values[0])); - default -> assertThat((List) resultValue, containsInAnyOrder(values)); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java index e2a77bed4f4cd..4b01603b3768d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java @@ -13,7 +13,9 @@ import org.elasticsearch.compute.operator.SequenceLongBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -37,7 +39,14 @@ protected String expectedDescriptionOfAggregator() { @Override public void assertSimpleOutput(List input, Block result) { - Object[] values = input.stream().flatMapToLong(b -> allLongs(b)).boxed().collect(Collectors.toSet()).toArray(Object[]::new); - assertThat((List) BlockUtils.toJavaObject(result, 0), containsInAnyOrder(values)); + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); + Object[] values = input.stream() + .flatMapToLong(AggregatorFunctionTestCase::allLongs) + .boxed() + .collect(Collectors.toSet()) + .toArray(Object[]::new); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java index ab667b959c7ae..8259d84d955ef 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.compute.operator.TupleBlockSourceOperator; import org.elasticsearch.core.Tuple; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -49,7 +51,12 @@ public void assertSimpleGroup(List input, Block result, int position, Long switch (values.length) { case 0 -> assertThat(resultValue, nullValue()); case 1 -> assertThat(resultValue, equalTo(values[0])); - default -> assertThat((List) resultValue, containsInAnyOrder(values)); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/FloatBucketedSortTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/FloatBucketedSortTests.java new file mode 100644 index 0000000000000..8b3d288339037 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/FloatBucketedSortTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data.sort; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.search.sort.SortOrder; + +import static org.hamcrest.Matchers.equalTo; + +public class FloatBucketedSortTests extends BucketedSortTestCase { + @Override + protected FloatBucketedSort build(SortOrder sortOrder, int bucketSize) { + return new FloatBucketedSort(bigArrays(), sortOrder, bucketSize); + } + + @Override + protected Object expectedValue(double v) { + return v; + } + + @Override + protected double randomValue() { + return randomFloatBetween(Float.MIN_VALUE, Float.MAX_VALUE, true); + } + + @Override + protected void collect(FloatBucketedSort sort, double value, int bucket) { + sort.collect((float) value, bucket); + } + + @Override + protected void merge(FloatBucketedSort sort, int groupId, FloatBucketedSort other, int otherGroupId) { + sort.merge(groupId, other, otherGroupId); + } + + @Override + protected Block toBlock(FloatBucketedSort sort, BlockFactory blockFactory, IntVector selected) { + return sort.toBlock(blockFactory, selected); + } + + @Override + protected void assertBlockTypeAndValues(Block block, Object... values) { + assertThat(block.elementType(), equalTo(ElementType.FLOAT)); + var typedBlock = (FloatBlock) block; + for (int i = 0; i < values.length; i++) { + assertThat((double) typedBlock.getFloat(i), equalTo(values[i])); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index 17d302f198bff..b126ca8af0e31 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -85,12 +84,12 @@ public void testSimple() { // for now we emit at most one time series each page int offset = 0; for (Page page : results) { - assertThat(page.getBlockCount(), equalTo(6)); + assertThat(page.getBlockCount(), equalTo(5)); DocVector docVector = (DocVector) page.getBlock(0).asVector(); BytesRefVector tsidVector = (BytesRefVector) page.getBlock(1).asVector(); LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - LongVector voltageVector = (LongVector) page.getBlock(4).asVector(); - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(5).asVector(); + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); for (int i = 0; i < page.getPositionCount(); i++) { int expectedTsidOrd = offset / numSamplesPerTS; String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); @@ -115,7 +114,7 @@ public void testLimit() { List results = runDriver(limit, randomIntBetween(1, 1024), randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); assertThat(results, hasSize(1)); Page page = results.get(0); - assertThat(page.getBlockCount(), equalTo(6)); + assertThat(page.getBlockCount(), equalTo(5)); DocVector docVector = (DocVector) page.getBlock(0).asVector(); assertThat(docVector.getPositionCount(), equalTo(limit)); @@ -126,10 +125,10 @@ public void testLimit() { LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); assertThat(timestampVector.getPositionCount(), equalTo(limit)); - LongVector voltageVector = (LongVector) page.getBlock(4).asVector(); + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); assertThat(voltageVector.getPositionCount(), equalTo(limit)); - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(5).asVector(); + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); assertThat(hostnameVector.getPositionCount(), equalTo(limit)); assertThat(docVector.shards().getInt(0), equalTo(0)); @@ -161,7 +160,6 @@ record Doc(int host, long timestamp, long metric) {} limit, maxPageSize, randomBoolean(), - TimeValue.ZERO, writer -> { Randomness.shuffle(docs); for (Doc doc : docs) { @@ -194,11 +192,11 @@ record Doc(int host, long timestamp, long metric) {} assertThat(page.getPositionCount(), lessThanOrEqualTo(limit)); assertThat(page.getPositionCount(), lessThanOrEqualTo(maxPageSize)); } - assertThat(page.getBlockCount(), equalTo(5)); + assertThat(page.getBlockCount(), equalTo(4)); DocVector docVector = (DocVector) page.getBlock(0).asVector(); BytesRefVector tsidVector = (BytesRefVector) page.getBlock(1).asVector(); LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - LongVector metricVector = (LongVector) page.getBlock(4).asVector(); + LongVector metricVector = (LongVector) page.getBlock(3).asVector(); for (int i = 0; i < page.getPositionCount(); i++) { Doc doc = docs.get(offset); offset++; @@ -242,7 +240,6 @@ public void testMatchNone() throws Exception { Integer.MAX_VALUE, randomIntBetween(1, 1024), 1, - TimeValue.ZERO, List.of(ctx), unused -> query ); @@ -264,7 +261,7 @@ public void testMatchNone() throws Exception { @Override protected Operator.OperatorFactory simple() { - return createTimeSeriesSourceOperator(directory, r -> this.reader = r, 1, 1, false, TimeValue.ZERO, writer -> { + return createTimeSeriesSourceOperator(directory, r -> this.reader = r, 1, 1, false, writer -> { long timestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); writeTS(writer, timestamp, new Object[] { "hostname", "host-01" }, new Object[] { "voltage", 2 }); return 1; @@ -289,7 +286,6 @@ List runDriver(int limit, int maxPageSize, boolean forceMerge, int numTime limit, maxPageSize, forceMerge, - TimeValue.ZERO, writer -> { long timestamp = timestampStart; for (int i = 0; i < numSamplesPerTS; i++) { @@ -333,7 +329,6 @@ public static TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperat int limit, int maxPageSize, boolean forceMerge, - TimeValue timeValue, CheckedFunction indexingLogic ) { Sort sort = new Sort( @@ -361,7 +356,7 @@ public static TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperat } var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); Function queryFunction = c -> new MatchAllDocsQuery(); - return TimeSeriesSortedSourceOperatorFactory.create(limit, maxPageSize, 1, timeValue, List.of(ctx), queryFunction); + return TimeSeriesSortedSourceOperatorFactory.create(limit, maxPageSize, 1, List.of(ctx), queryFunction); } public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java index 848f3750a4b20..fa72721545ab9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java @@ -31,6 +31,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Set; @@ -353,4 +354,26 @@ public void setThreadPool() { public void shutdownThreadPool() { terminate(threadPool); } + + protected Comparator floatComparator() { + return FloatComparator.INSTANCE; + } + + static final class FloatComparator implements Comparator { + + static final FloatComparator INSTANCE = new FloatComparator(); + + @Override + public int compare(Float o1, Float o2) { + float first = o1; + float second = o2; + if (first < second) { + return -1; + } else if (first == second) { + return 0; + } else { + return 1; + } + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LongFloatTupleBlockSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LongFloatTupleBlockSourceOperator.java new file mode 100644 index 0000000000000..9276174c9dbb1 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LongFloatTupleBlockSourceOperator.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Tuple; + +import java.util.List; +import java.util.stream.Stream; + +/** + * A source operator whose output is the given tuple values. This operator produces pages + * with two Blocks. The returned pages preserve the order of values as given in the in initial list. + */ +public class LongFloatTupleBlockSourceOperator extends AbstractBlockSourceOperator { + + private static final int DEFAULT_MAX_PAGE_POSITIONS = 8 * 1024; + + private final List> values; + + public LongFloatTupleBlockSourceOperator(BlockFactory blockFactory, Stream> values) { + this(blockFactory, values, DEFAULT_MAX_PAGE_POSITIONS); + } + + public LongFloatTupleBlockSourceOperator(BlockFactory blockFactory, Stream> values, int maxPagePositions) { + super(blockFactory, maxPagePositions); + this.values = values.toList(); + } + + public LongFloatTupleBlockSourceOperator(BlockFactory blockFactory, List> values) { + this(blockFactory, values, DEFAULT_MAX_PAGE_POSITIONS); + } + + public LongFloatTupleBlockSourceOperator(BlockFactory blockFactory, List> values, int maxPagePositions) { + super(blockFactory, maxPagePositions); + this.values = values; + } + + @Override + protected Page createPage(int positionOffset, int length) { + var blockBuilder1 = blockFactory.newLongBlockBuilder(length); + var blockBuilder2 = blockFactory.newFloatBlockBuilder(length); + for (int i = 0; i < length; i++) { + Tuple item = values.get(positionOffset + i); + if (item.v1() == null) { + blockBuilder1.appendNull(); + } else { + blockBuilder1.appendLong(item.v1()); + } + if (item.v2() == null) { + blockBuilder2.appendNull(); + } else { + blockBuilder2.appendFloat(item.v2()); + } + } + currentPosition += length; + return new Page(blockBuilder1.build(), blockBuilder2.build()); + } + + @Override + protected int remaining() { + return values.size() - currentPosition; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/NullInsertingSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/NullInsertingSourceOperator.java index 260918396dcd3..c8444551f415b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/NullInsertingSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/NullInsertingSourceOperator.java @@ -14,6 +14,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -99,6 +100,9 @@ private void copyValue(Block from, int valueIndex, Block.Builder into) { case DOUBLE: ((DoubleBlock.Builder) into).appendDouble(((DoubleBlock) from).getDouble(valueIndex)); break; + case FLOAT: + ((FloatBlock.Builder) into).appendFloat(((FloatBlock) from).getFloat(valueIndex)); + break; default: throw new IllegalArgumentException("unknown block type " + elementType); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/PositionMergingSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/PositionMergingSourceOperator.java index 4bbd6d0af0c2a..651ec6dc191a9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/PositionMergingSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/PositionMergingSourceOperator.java @@ -13,6 +13,7 @@ import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.FloatBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -74,6 +75,7 @@ private void copyTo(Block.Builder builder, Block in, int position, int valueCoun switch (in.elementType()) { case BOOLEAN -> ((BooleanBlock.Builder) builder).appendBoolean(((BooleanBlock) in).getBoolean(i)); case BYTES_REF -> ((BytesRefBlock.Builder) builder).appendBytesRef(((BytesRefBlock) in).getBytesRef(i, scratch)); + case FLOAT -> ((FloatBlock.Builder) builder).appendFloat(((FloatBlock) in).getFloat(i)); case DOUBLE -> ((DoubleBlock.Builder) builder).appendDouble(((DoubleBlock) in).getDouble(i)); case INT -> ((IntBlock.Builder) builder).appendInt(((IntBlock) in).getInt(i)); case LONG -> ((LongBlock.Builder) builder).appendLong(((LongBlock) in).getLong(i)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceFloatBlockSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceFloatBlockSourceOperator.java new file mode 100644 index 0000000000000..db524366b381e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceFloatBlockSourceOperator.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.Page; + +import java.util.List; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +/** + * A source operator whose output is the given float values. This operator produces pages + * containing a single Block. The Block contains the float values from the given list, in order. + */ +public class SequenceFloatBlockSourceOperator extends AbstractBlockSourceOperator { + + static final int DEFAULT_MAX_PAGE_POSITIONS = 8 * 1024; + + private final float[] values; + + public SequenceFloatBlockSourceOperator(BlockFactory blockFactory, Stream values) { + this(blockFactory, values, DEFAULT_MAX_PAGE_POSITIONS); + } + + public SequenceFloatBlockSourceOperator(BlockFactory blockFactory, Stream values, int maxPagePositions) { + super(blockFactory, maxPagePositions); + var l = values.toList(); + this.values = new float[l.size()]; + IntStream.range(0, l.size()).forEach(i -> this.values[i] = l.get(i)); + } + + public SequenceFloatBlockSourceOperator(BlockFactory blockFactory, List values) { + this(blockFactory, values, DEFAULT_MAX_PAGE_POSITIONS); + } + + public SequenceFloatBlockSourceOperator(BlockFactory blockFactory, List values, int maxPagePositions) { + super(blockFactory, maxPagePositions); + this.values = new float[values.size()]; + IntStream.range(0, this.values.length).forEach(i -> this.values[i] = values.get(i)); + } + + @Override + protected Page createPage(int positionOffset, int length) { + FloatVector.FixedBuilder builder = blockFactory.newFloatVectorFixedBuilder(length); + for (int i = 0; i < length; i++) { + builder.appendFloat(values[positionOffset + i]); + } + currentPosition += length; + return new Page(builder.build().asBlock()); + } + + protected int remaining() { + return values.length - currentPosition; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java index 573c960e86b9c..da1a9c9408f90 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java @@ -11,13 +11,17 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.compute.aggregation.RateLongAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperatorTests; import org.elasticsearch.core.IOUtils; @@ -27,6 +31,7 @@ import org.junit.After; import java.io.IOException; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; import java.util.stream.IntStream; @@ -203,7 +208,6 @@ record Doc(String pod, String cluster, long timestamp, long requests) { Integer.MAX_VALUE, between(1, 100), randomBoolean(), - bucketInterval, writer -> { List docs = new ArrayList<>(); for (Pod pod : pods) { @@ -227,15 +231,35 @@ record Doc(String pod, String cluster, long timestamp, long requests) { ); var ctx = driverContext(); - List extractOperators = new ArrayList<>(); + List intermediateOperators = new ArrayList<>(); + final Rounding.Prepared rounding = new Rounding.Builder(bucketInterval).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); + var timeBucket = new EvalOperator(ctx.blockFactory(), new EvalOperator.ExpressionEvaluator() { + @Override + public Block eval(Page page) { + LongBlock timestampsBlock = page.getBlock(2); + LongVector timestamps = timestampsBlock.asVector(); + try (var builder = blockFactory().newLongVectorFixedBuilder(timestamps.getPositionCount())) { + for (int i = 0; i < timestamps.getPositionCount(); i++) { + builder.appendLong(rounding.round(timestampsBlock.getLong(i))); + } + return builder.build().asBlock(); + } + } + + @Override + public void close() { + + } + }); + intermediateOperators.add(timeBucket); var rateField = new NumberFieldMapper.NumberFieldType("requests", NumberFieldMapper.NumberType.LONG); Operator extractRate = (ValuesSourceReaderOperatorTests.factory(reader, rateField, ElementType.LONG).get(ctx)); - extractOperators.add(extractRate); + intermediateOperators.add(extractRate); List nonBucketGroupings = new ArrayList<>(groupings); nonBucketGroupings.remove("bucket"); for (String grouping : nonBucketGroupings) { var groupingField = new KeywordFieldMapper.KeywordFieldType(grouping); - extractOperators.add(ValuesSourceReaderOperatorTests.factory(reader, groupingField, ElementType.BYTES_REF).get(ctx)); + intermediateOperators.add(ValuesSourceReaderOperatorTests.factory(reader, groupingField, ElementType.BYTES_REF).get(ctx)); } // _doc, tsid, timestamp, bucket, requests, grouping1, grouping2 Operator intialAgg = new TimeSeriesAggregationOperatorFactories.Initial( @@ -278,7 +302,7 @@ record Doc(String pod, String cluster, long timestamp, long requests) { new Driver( ctx, sourceOperatorFactory.get(ctx), - CollectionUtils.concatLists(extractOperators, List.of(intialAgg, intermediateAgg, finalAgg)), + CollectionUtils.concatLists(intermediateOperators, List.of(intialAgg, intermediateAgg, finalAgg)), new TestResultPageSinkOperator(results::add), () -> {} ) diff --git a/x-pack/plugin/esql/qa/testFixtures/build.gradle b/x-pack/plugin/esql/qa/testFixtures/build.gradle index 520873a6cb03e..e8a95011100f5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/build.gradle +++ b/x-pack/plugin/esql/qa/testFixtures/build.gradle @@ -1,9 +1,9 @@ apply plugin: 'elasticsearch.java' - +apply plugin: org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin dependencies { implementation project(':x-pack:plugin:esql:compute') - compileOnly project(':x-pack:plugin:esql') + implementation project(':x-pack:plugin:esql') compileOnly project(path: xpackModule('core')) implementation project(":libs:elasticsearch-x-content") implementation project(':client:rest') @@ -11,7 +11,14 @@ dependencies { implementation project(':test:framework') api(testArtifact(project(xpackModule('esql-core')))) implementation project(':server') - api "net.sf.supercsv:super-csv:${versions.supercsv}" + implementation "net.sf.supercsv:super-csv:${versions.supercsv}" +} + +/** + * This is needed for CsvTestsDataLoaderTests to reflect the classpath that CsvTestsDataLoader actually uses when "main" method is executed. + */ +tasks.named("test").configure { + classpath = classpath - (configurations.resolveableCompileOnly - configurations.runtimeClasspath) } /** diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec index 377d6d6678032..35e1101becbf9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec @@ -1,5 +1,5 @@ keywordByInt -required_capability: lookup_command +required_capability: tables_types FROM employees | SORT emp_no | LIMIT 4 @@ -17,7 +17,7 @@ emp_no:integer | languages:integer | lang_name:keyword ; keywordByMvInt -required_capability: lookup_command +required_capability: tables_types ROW int=[1, 2, 3] | LOOKUP int_number_names ON int ; @@ -27,7 +27,7 @@ int:integer | name:keyword ; keywordByDupeInt -required_capability: lookup_command +required_capability: tables_types ROW int=[1, 1, 1] | LOOKUP int_number_names ON int ; @@ -37,7 +37,7 @@ int:integer | name:keyword ; intByKeyword -required_capability: lookup_command +required_capability: tables_types ROW name="two" | LOOKUP int_number_names ON name ; @@ -48,7 +48,7 @@ name:keyword | int:integer keywordByLong -required_capability: lookup_command +required_capability: tables_types FROM employees | SORT emp_no | LIMIT 4 @@ -66,7 +66,7 @@ emp_no:integer | languages:long | lang_name:keyword ; longByKeyword -required_capability: lookup_command +required_capability: tables_types ROW name="two" | LOOKUP long_number_names ON name ; @@ -76,7 +76,7 @@ name:keyword | long:long ; keywordByFloat -required_capability: lookup_command +required_capability: tables_types FROM employees | SORT emp_no | LIMIT 4 @@ -94,7 +94,7 @@ emp_no:integer | height:double | height_name:keyword ; floatByKeyword -required_capability: lookup_command +required_capability: tables_types ROW name="two point zero eight" | LOOKUP double_number_names ON name ; @@ -104,7 +104,7 @@ two point zero eight | 2.08 ; floatByNullMissing -required_capability: lookup_command +required_capability: tables_types ROW name=null | LOOKUP double_number_names ON name ; @@ -114,7 +114,7 @@ name:null | double:double ; floatByNullMatching -required_capability: lookup_command +required_capability: tables_types ROW name=null | LOOKUP double_number_names_with_null ON name ; @@ -124,7 +124,7 @@ name:null | double:double ; intIntByKeywordKeyword -required_capability: lookup_command +required_capability: tables_types ROW aa="foo", ab="zoo" | LOOKUP big ON aa, ab ; @@ -134,7 +134,7 @@ foo | zoo | 1 | -1 ; intIntByKeywordKeywordMissing -required_capability: lookup_command +required_capability: tables_types ROW aa="foo", ab="zoi" | LOOKUP big ON aa, ab ; @@ -144,7 +144,7 @@ foo | zoi | null | null ; intIntByKeywordKeywordSameValues -required_capability: lookup_command +required_capability: tables_types ROW aa="foo", ab="foo" | LOOKUP big ON aa, ab ; @@ -154,7 +154,7 @@ foo | foo | 2 | -2 ; intIntByKeywordKeywordSameValuesMissing -required_capability: lookup_command +required_capability: tables_types ROW aa="bar", ab="bar" | LOOKUP big ON aa, ab ; @@ -164,7 +164,7 @@ bar | bar | null | null ; lookupBeforeStats -required_capability: lookup_command +required_capability: tables_types FROM employees | RENAME languages AS int | LOOKUP int_number_names ON int @@ -182,7 +182,7 @@ height:double | languages:keyword ; lookupAfterStats -required_capability: lookup_command +required_capability: tables_types FROM employees | STATS int=TO_INT(AVG(height)) | LOOKUP int_number_names ON int @@ -194,7 +194,7 @@ two // Makes sure the LOOKUP squashes previous names doesNotDuplicateNames -required_capability: lookup_command +required_capability: tables_types FROM employees | SORT emp_no | LIMIT 4 @@ -213,7 +213,7 @@ emp_no:integer | languages:long | name:keyword ; lookupBeforeSort -required_capability: lookup_command +required_capability: tables_types FROM employees | WHERE emp_no < 10005 | RENAME languages AS int @@ -231,7 +231,7 @@ languages:keyword | emp_no:integer ; lookupAfterSort -required_capability: lookup_command +required_capability: tables_types FROM employees | WHERE emp_no < 10005 | SORT languages ASC, emp_no ASC @@ -253,7 +253,7 @@ languages:keyword | emp_no:integer // named "lookup" // rowNamedLookup -required_capability: lookup_command +required_capability: tables_types ROW lookup = "a" ; @@ -262,7 +262,7 @@ lookup:keyword ; rowNamedLOOKUP -required_capability: lookup_command +required_capability: tables_types ROW LOOKUP = "a" ; @@ -271,7 +271,7 @@ LOOKUP:keyword ; evalNamedLookup -required_capability: lookup_command +required_capability: tables_types ROW a = "a" | EVAL lookup = CONCAT(a, "1") ; @@ -280,7 +280,7 @@ a:keyword | lookup:keyword ; dissectNamedLookup -required_capability: lookup_command +required_capability: tables_types row a = "foo bar" | dissect a "foo %{lookup}"; a:keyword | lookup:keyword @@ -288,7 +288,7 @@ a:keyword | lookup:keyword ; renameIntoLookup -required_capability: lookup_command +required_capability: tables_types row a = "foo bar" | RENAME a AS lookup; lookup:keyword @@ -296,7 +296,7 @@ lookup:keyword ; sortOnLookup -required_capability: lookup_command +required_capability: tables_types ROW lookup = "a" | SORT lookup ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/test/java/org/elasticsearch/xpack/esql/CsvTestsDataLoaderTests.java b/x-pack/plugin/esql/qa/testFixtures/src/test/java/org/elasticsearch/xpack/esql/CsvTestsDataLoaderTests.java new file mode 100644 index 0000000000000..5b40e1d03e92f --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/test/java/org/elasticsearch/xpack/esql/CsvTestsDataLoaderTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql; + +import org.elasticsearch.test.ESTestCase; + +import java.net.ConnectException; + +import static org.hamcrest.Matchers.startsWith; + +public class CsvTestsDataLoaderTests extends ESTestCase { + + public void testCsvTestsDataLoaderExecution() { + ConnectException ce = expectThrows(ConnectException.class, () -> CsvTestsDataLoader.main(new String[] {})); + assertThat(ce.getMessage(), startsWith("Connection refused")); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 654c1ffd8a5e9..9b759a49eab4e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -14,6 +14,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Set; /** @@ -22,88 +23,90 @@ * {@link RestNodesCapabilitiesAction} and we use them to enable tests. */ public class EsqlCapabilities { - /** - * Support for function {@code CBRT}. Done in #108574. - */ - private static final String FN_CBRT = "fn_cbrt"; + public enum Cap { + /** + * Support for function {@code CBRT}. Done in #108574. + */ + FN_CBRT, - /** - * Support for {@code MV_APPEND} function. #107001 - */ - private static final String FN_MV_APPEND = "fn_mv_append"; + /** + * Support for {@code MV_APPEND} function. #107001 + */ + FN_MV_APPEND, - /** - * Support for function {@code IP_PREFIX}. - */ - private static final String FN_IP_PREFIX = "fn_ip_prefix"; + /** + * Support for function {@code IP_PREFIX}. + */ + FN_IP_PREFIX, - /** - * Fix on function {@code SUBSTRING} that makes it not return null on empty strings. - */ - private static final String FN_SUBSTRING_EMPTY_NULL = "fn_substring_empty_null"; + /** + * Fix on function {@code SUBSTRING} that makes it not return null on empty strings. + */ + FN_SUBSTRING_EMPTY_NULL, - /** - * Support for aggregation function {@code TOP_LIST}. - */ - private static final String AGG_TOP_LIST = "agg_top_list"; + /** + * Support for aggregation function {@code TOP_LIST}. + */ + AGG_TOP_LIST, - /** - * Optimization for ST_CENTROID changed some results in cartesian data. #108713 - */ - private static final String ST_CENTROID_AGG_OPTIMIZED = "st_centroid_agg_optimized"; + /** + * Optimization for ST_CENTROID changed some results in cartesian data. #108713 + */ + ST_CENTROID_AGG_OPTIMIZED, - /** - * Support for requesting the "_ignored" metadata field. - */ - private static final String METADATA_IGNORED_FIELD = "metadata_field_ignored"; + /** + * Support for requesting the "_ignored" metadata field. + */ + METADATA_IGNORED_FIELD, - /** - * Support for the "LOOKUP" command. - */ - private static final String LOOKUP_COMMAND = "lookup_command"; + /** + * Support for the syntax {@code "tables": {"type": []}}. + */ + TABLES_TYPES(true), - /** - * Support for the syntax {@code "tables": {"type": []}}. - */ - private static final String TABLES_TYPES = "tables_types"; + /** + * Support for requesting the "REPEAT" command. + */ + REPEAT, - /** - * Support for requesting the "REPEAT" command. - */ - private static final String REPEAT = "repeat"; + /** + * Cast string literals to datetime in addition and subtraction when the other side is a date or time interval. + */ + STRING_LITERAL_AUTO_CASTING_TO_DATETIME_ADD_SUB, - /** - * Cast string literals to datetime in addition and subtraction when the other side is a date or time interval. - */ - public static final String STRING_LITERAL_AUTO_CASTING_TO_DATETIME_ADD_SUB = "string_literal_auto_casting_to_datetime_add_sub"; + /** + * Support for named or positional parameters in EsqlQueryRequest. + */ + NAMED_POSITIONAL_PARAMETER, - /** - * Support multiple field mappings if appropriate conversion function is used (union types) - */ - public static final String UNION_TYPES = "union_types"; + /** + * Support multiple field mappings if appropriate conversion function is used (union types) + */ + UNION_TYPES; - /** - * Support for named or positional parameters in EsqlQueryRequest. - */ - private static final String NAMED_POSITIONAL_PARAMETER = "named_positional_parameter"; + Cap() { + snapshotOnly = false; + }; + + Cap(boolean snapshotOnly) { + this.snapshotOnly = snapshotOnly; + }; + + public String capabilityName() { + return name().toLowerCase(Locale.ROOT); + } + + private final boolean snapshotOnly; + } public static final Set CAPABILITIES = capabilities(); private static Set capabilities() { List caps = new ArrayList<>(); - caps.add(FN_CBRT); - caps.add(FN_IP_PREFIX); - caps.add(FN_SUBSTRING_EMPTY_NULL); - caps.add(AGG_TOP_LIST); - caps.add(ST_CENTROID_AGG_OPTIMIZED); - caps.add(METADATA_IGNORED_FIELD); - caps.add(FN_MV_APPEND); - caps.add(REPEAT); - caps.add(UNION_TYPES); - caps.add(NAMED_POSITIONAL_PARAMETER); - - if (Build.current().isSnapshot()) { - caps.add(LOOKUP_COMMAND); + for (Cap cap : Cap.values()) { + if (Build.current().isSnapshot() || cap.snapshotOnly == false) { + caps.add(cap.capabilityName()); + } } /* @@ -115,7 +118,6 @@ private static Set capabilities() { for (NodeFeature feature : new EsqlFeatures().getHistoricalFeatures().keySet()) { caps.add(cap(feature)); } - caps.add(STRING_LITERAL_AUTO_CASTING_TO_DATETIME_ADD_SUB); return Set.copyOf(caps); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java index 4f991af54ecff..17934c1729ad7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java @@ -7,10 +7,25 @@ package org.elasticsearch.xpack.esql.expression.function.scalar; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateDiff; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.Now; +import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; import java.util.List; @@ -25,6 +40,24 @@ *

*/ public abstract class EsqlScalarFunction extends ScalarFunction implements EvaluatorMapper { + public static List getNamedWriteables() { + return List.of( + Case.ENTRY, + Coalesce.ENTRY, + Concat.ENTRY, + Greatest.ENTRY, + InsensitiveEquals.ENTRY, + DateExtract.ENTRY, + DateDiff.ENTRY, + DateFormat.ENTRY, + DateParse.ENTRY, + DateTrunc.ENTRY, + Least.ENTRY, + Now.ENTRY, + ToLower.ENTRY, + ToUpper.ENTRY + ); + } protected EsqlScalarFunction(Source source) { super(source); @@ -38,5 +71,4 @@ protected EsqlScalarFunction(Source source, List fields) { public Object fold() { return EvaluatorMapper.super.fold(); } - } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 0866f97b67724..eb2e5ab94487f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -12,6 +12,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FromBase64; @@ -76,10 +79,13 @@ public static List getNamedWriteables() { Cosh.ENTRY, Floor.ENTRY, FromBase64.ENTRY, + IsNotNull.ENTRY, + IsNull.ENTRY, Length.ENTRY, Log10.ENTRY, LTrim.ENTRY, Neg.ENTRY, + Not.ENTRY, RTrim.ENTRY, Signum.ENTRY, Sin.ENTRY, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index f98f5c45acd16..50d0e5484756e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.ElementType; @@ -27,8 +30,11 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -37,8 +43,12 @@ import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; public final class Case extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Case", Case::new); + record Condition(Expression condition, Expression value) {} private final List conditions; @@ -110,6 +120,26 @@ public Case( elseValue = elseValueIsExplicit() ? children().get(children().size() - 1) : new Literal(source, null, NULL); } + private Case(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(children().get(0)); + out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + private boolean elseValueIsExplicit() { return children().size() % 2 == 1; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index 8062019b4c51c..580e2f9900208 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; @@ -23,17 +26,24 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import java.io.IOException; import java.util.List; import java.util.function.Function; import java.util.stream.Stream; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Returns the maximum value of multiple columns. */ public class Greatest extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Greatest", Greatest::new); + private DataType dataType; @FunctionInfo( @@ -61,6 +71,26 @@ public Greatest( super(source, Stream.concat(Stream.of(first), rest.stream()).toList()); } + private Greatest(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(children().get(0)); + out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { if (dataType == null) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index f983e0125a4db..2255fed9d4947 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; @@ -23,17 +26,24 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import java.io.IOException; import java.util.List; import java.util.function.Function; import java.util.stream.Stream; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Returns the minimum value of multiple columns. */ public class Least extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Least", Least::new); + private DataType dataType; @FunctionInfo( @@ -59,6 +69,26 @@ public Least( super(source, Stream.concat(Stream.of(first), rest.stream()).toList()); } + private Least(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(children().get(0)); + out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { if (dataType == null) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java index 42e20a9a4615e..2a224598253f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -20,7 +23,10 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import java.io.IOException; import java.time.Instant; import java.time.ZoneId; import java.time.ZonedDateTime; @@ -47,6 +53,7 @@ * If the second argument (start) is greater than the third argument (end), then negative values are returned. */ public class DateDiff extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "DateDiff", DateDiff::new); public static final ZoneId UTC = ZoneId.of("Z"); @@ -166,6 +173,40 @@ public DateDiff( this.endTimestamp = endTimestamp; } + private DateDiff(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).readExpression() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + ((PlanStreamOutput) out).writeExpression(unit); + ((PlanStreamOutput) out).writeExpression(startTimestamp); + ((PlanStreamOutput) out).writeExpression(endTimestamp); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + Expression unit() { + return unit; + } + + Expression startTimestamp() { + return startTimestamp; + } + + Expression endTimestamp() { + return endTimestamp; + } + @Evaluator(extraName = "Constant", warnExceptions = { IllegalArgumentException.class, InvalidArgumentException.class }) static int process(@Fixed Part datePartFieldUnit, long startTimestamp, long endTimestamp) throws IllegalArgumentException { ZonedDateTime zdtStart = ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimestamp), UTC); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java index c28c5e417c152..f3448a2b7c5ff 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -22,8 +25,11 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.time.ZoneId; import java.time.temporal.ChronoField; import java.util.List; @@ -35,6 +41,11 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.chronoToLong; public class DateExtract extends EsqlConfigurationFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "DateExtract", + DateExtract::new + ); private ChronoField chronoField; @@ -69,6 +80,35 @@ public DateExtract( super(source, List.of(chronoFieldExp, field), configuration); } + private DateExtract(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).readExpression(), + ((PlanStreamInput) in).configuration() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(datePart()); + ((PlanStreamOutput) out).writeExpression(field()); + } + + Expression datePart() { + return children().get(0); + } + + Expression field() { + return children().get(1); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { var fieldEvaluator = toEvaluator.apply(children().get(1)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java index bcc5d7cb16050..9a789c2bb6fb2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; @@ -22,9 +25,12 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.List; import java.util.Locale; import java.util.function.Function; @@ -37,6 +43,11 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToString; public class DateFormat extends EsqlConfigurationFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "DateFormat", + DateFormat::new + ); private final Expression field; private final Expression format; @@ -59,6 +70,35 @@ Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.S this.format = date != null ? format : null; } + private DateFormat(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()), + ((PlanStreamInput) in).configuration() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(children().get(0)); + out.writeOptionalWriteable(children().size() == 1 ? null : o -> ((PlanStreamOutput) o).writeExpression(children().get(1))); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + Expression field() { + return field; + } + + Expression format() { + return format; + } + @Override public DataType dataType() { return DataType.KEYWORD; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java index d68664afe8418..12ffe092287ed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; @@ -22,8 +25,11 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.time.ZoneId; import java.util.List; import java.util.function.Function; @@ -38,6 +44,11 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.dateTimeToLong; public class DateParse extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "DateParse", + DateParse::new + ); private final Expression field; private final Expression format; @@ -64,6 +75,26 @@ public DateParse( this.format = second != null ? first : null; } + private DateParse(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(children().get(0)); + out.writeOptionalWriteable(children().size() == 2 ? o -> ((PlanStreamOutput) out).writeExpression(children().get(1)) : null); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.DATETIME; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java index ddd51d281105d..995e525dda9ec 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -20,8 +23,11 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.time.Duration; import java.time.Period; import java.time.ZoneId; @@ -36,6 +42,12 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; public class DateTrunc extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "DateTrunc", + DateTrunc::new + ); + private final Expression interval; private final Expression timestampField; protected static final ZoneId DEFAULT_TZ = ZoneOffset.UTC; @@ -69,6 +81,30 @@ public DateTrunc( this.timestampField = field; } + private DateTrunc(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).readExpression()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(interval); + ((PlanStreamOutput) out).writeExpression(timestampField); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + Expression interval() { + return interval; + } + + Expression field() { + return timestampField; + } + @Override protected TypeResolution resolveType() { if (childrenResolved() == false) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java index fe54cfd186fec..0f401e3de8045 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -18,11 +21,14 @@ import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.List; import java.util.function.Function; public class Now extends EsqlConfigurationFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Now", Now::new); private final long now; @@ -38,13 +44,18 @@ public Now(Source source, Configuration configuration) { this.now = configuration.now() == null ? System.currentTimeMillis() : configuration.now().toInstant().toEpochMilli(); } - private Now(Source source, long now) { - super(source, List.of(), null); - this.now = now; + private Now(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).configuration()); } - public static Now newInstance(Source source, long now) { - return new Now(source, now); + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + } + + @Override + public String getWriteableName() { + return ENTRY.name; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index ff7cd83eedbe2..6a02eb4b94f12 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.nulls; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; @@ -27,19 +30,26 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import java.io.IOException; import java.util.List; import java.util.function.Function; import java.util.stream.IntStream; import java.util.stream.Stream; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Function returning the first non-null value. */ public class Coalesce extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Coalesce", Coalesce::new); + private DataType dataType; @FunctionInfo( @@ -100,6 +110,26 @@ public Coalesce( super(source, Stream.concat(Stream.of(first), rest.stream()).toList()); } + private Coalesce(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(children().get(0)); + out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { if (dataType == null) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java index d01edbe7024e8..69464787f9288 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; @@ -22,7 +25,10 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import java.io.IOException; import java.util.List; import java.util.function.Function; import java.util.stream.Stream; @@ -30,11 +36,14 @@ import static org.elasticsearch.common.unit.ByteSizeUnit.MB; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; +import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Join strings. */ public class Concat extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Concat", Concat::new); static final long MAX_CONCAT_LENGTH = MB.toBytes(1); @@ -51,6 +60,26 @@ public Concat( super(source, Stream.concat(Stream.of(first), rest.stream()).toList()); } + private Concat(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + ((PlanStreamInput) in).readExpression(), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + ((PlanStreamOutput) out).writeExpression(children().get(0)); + out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.KEYWORD; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java index f14df4f56929a..aadb0b3ac7886 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; @@ -17,12 +20,15 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import java.io.IOException; import java.util.List; import java.util.Locale; import java.util.function.Function; @@ -31,6 +37,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; public class ToLower extends EsqlConfigurationFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "ToLower", ToLower::new); private final Expression field; @@ -52,6 +59,20 @@ public ToLower( this.field = field; } + private ToLower(StreamInput in) throws IOException { + this(Source.EMPTY, ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).configuration()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ((PlanStreamOutput) out).writeExpression(field()); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return field.dataType(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java index 6c903b4bfddeb..398fe1c76a49f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; @@ -17,12 +20,15 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import java.io.IOException; import java.util.List; import java.util.Locale; import java.util.function.Function; @@ -31,6 +37,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; public class ToUpper extends EsqlConfigurationFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "ToUpper", ToUpper::new); private final Expression field; @@ -52,6 +59,20 @@ public ToUpper( this.field = field; } + private ToUpper(StreamInput in) throws IOException { + this(Source.EMPTY, ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).configuration()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ((PlanStreamOutput) out).writeExpression(field()); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return field.dataType(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 831d105a89076..74e8661596e41 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -62,16 +62,8 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateDiff; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.Now; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.IpPrefix; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan2; @@ -82,13 +74,11 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tau; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialContains; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Locate; @@ -99,13 +89,10 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Dissect.Parser; @@ -246,27 +233,12 @@ public static List namedTypeEntries() { // BinaryLogic of(BinaryLogic.class, And.class, PlanNamedTypes::writeBinaryLogic, PlanNamedTypes::readBinaryLogic), of(BinaryLogic.class, Or.class, PlanNamedTypes::writeBinaryLogic, PlanNamedTypes::readBinaryLogic), - // UnaryScalarFunction - of(QL_UNARY_SCLR_CLS, IsNotNull.class, PlanNamedTypes::writeQLUnaryScalar, PlanNamedTypes::readQLUnaryScalar), - of(QL_UNARY_SCLR_CLS, IsNull.class, PlanNamedTypes::writeQLUnaryScalar, PlanNamedTypes::readQLUnaryScalar), - of(QL_UNARY_SCLR_CLS, Not.class, PlanNamedTypes::writeQLUnaryScalar, PlanNamedTypes::readQLUnaryScalar), // ScalarFunction of(ScalarFunction.class, Atan2.class, PlanNamedTypes::writeAtan2, PlanNamedTypes::readAtan2), - of(ScalarFunction.class, Case.class, PlanNamedTypes::writeVararg, PlanNamedTypes::readVarag), of(ScalarFunction.class, CIDRMatch.class, PlanNamedTypes::writeCIDRMatch, PlanNamedTypes::readCIDRMatch), - of(ScalarFunction.class, Coalesce.class, PlanNamedTypes::writeVararg, PlanNamedTypes::readVarag), - of(ScalarFunction.class, Concat.class, PlanNamedTypes::writeVararg, PlanNamedTypes::readVarag), - of(ScalarFunction.class, DateDiff.class, PlanNamedTypes::writeDateDiff, PlanNamedTypes::readDateDiff), - of(ScalarFunction.class, DateExtract.class, PlanNamedTypes::writeDateExtract, PlanNamedTypes::readDateExtract), - of(ScalarFunction.class, DateFormat.class, PlanNamedTypes::writeDateFormat, PlanNamedTypes::readDateFormat), - of(ScalarFunction.class, DateParse.class, PlanNamedTypes::writeDateTimeParse, PlanNamedTypes::readDateTimeParse), - of(ScalarFunction.class, DateTrunc.class, PlanNamedTypes::writeDateTrunc, PlanNamedTypes::readDateTrunc), of(ScalarFunction.class, E.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), - of(ScalarFunction.class, Greatest.class, PlanNamedTypes::writeVararg, PlanNamedTypes::readVarag), of(ScalarFunction.class, IpPrefix.class, (out, prefix) -> prefix.writeTo(out), IpPrefix::readFrom), - of(ScalarFunction.class, Least.class, PlanNamedTypes::writeVararg, PlanNamedTypes::readVarag), of(ScalarFunction.class, Log.class, PlanNamedTypes::writeLog, PlanNamedTypes::readLog), - of(ScalarFunction.class, Now.class, PlanNamedTypes::writeNow, PlanNamedTypes::readNow), of(ScalarFunction.class, Pi.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), of(ScalarFunction.class, Round.class, PlanNamedTypes::writeRound, PlanNamedTypes::readRound), of(ScalarFunction.class, Pow.class, PlanNamedTypes::writePow, PlanNamedTypes::readPow), @@ -284,8 +256,6 @@ public static List namedTypeEntries() { of(ScalarFunction.class, Split.class, PlanNamedTypes::writeSplit, PlanNamedTypes::readSplit), of(ScalarFunction.class, Tau.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), of(ScalarFunction.class, Replace.class, PlanNamedTypes::writeReplace, PlanNamedTypes::readReplace), - of(ScalarFunction.class, ToLower.class, PlanNamedTypes::writeToLower, PlanNamedTypes::readToLower), - of(ScalarFunction.class, ToUpper.class, PlanNamedTypes::writeToUpper, PlanNamedTypes::readToUpper), // GroupingFunctions of(GroupingFunction.class, Bucket.class, PlanNamedTypes::writeBucket, PlanNamedTypes::readBucket), // AggregateFunctions @@ -309,10 +279,11 @@ public static List namedTypeEntries() { AbstractMultivalueFunction.getNamedWriteables(), EsqlArithmeticOperation.getNamedWriteables(), EsqlBinaryComparison.getNamedWriteables(), + EsqlScalarFunction.getNamedWriteables(), FullTextPredicate.getNamedWriteables(), NamedExpression.getNamedWriteables(), UnaryScalarFunction.getNamedWriteables(), - List.of(UnsupportedAttribute.ENTRY, InsensitiveEquals.ENTRY, Literal.ENTRY, org.elasticsearch.xpack.esql.expression.Order.ENTRY) + List.of(UnsupportedAttribute.ENTRY, Literal.ENTRY, org.elasticsearch.xpack.esql.expression.Order.ENTRY) )) { for (NamedWriteableRegistry.Entry e : ee) { entries.add(of(Expression.class, e)); @@ -1081,32 +1052,6 @@ static void writeBucket(PlanStreamOutput out, Bucket bucket) throws IOException out.writeOptionalExpression(bucket.to()); } - static final Map, ScalarFunction>> VARARG_CTORS = Map.ofEntries( - entry(name(Case.class), Case::new), - entry(name(Coalesce.class), Coalesce::new), - entry(name(Concat.class), Concat::new), - entry(name(Greatest.class), Greatest::new), - entry(name(Least.class), Least::new) - ); - - static ScalarFunction readVarag(PlanStreamInput in, String name) throws IOException { - return VARARG_CTORS.get(name) - .apply( - Source.readFrom(in), - in.readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) - ); - } - - static void writeVararg(PlanStreamOutput out, ScalarFunction vararg) throws IOException { - vararg.source().writeTo(out); - out.writeExpression(vararg.children().get(0)); - out.writeCollection( - vararg.children().subList(1, vararg.children().size()), - writerFromPlanWriter(PlanStreamOutput::writeExpression) - ); - } - static CountDistinct readCountDistinct(PlanStreamInput in) throws IOException { return new CountDistinct(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class)); } @@ -1119,67 +1064,6 @@ static void writeCountDistinct(PlanStreamOutput out, CountDistinct countDistinct out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); } - static DateDiff readDateDiff(PlanStreamInput in) throws IOException { - return new DateDiff(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readExpression()); - } - - static void writeDateDiff(PlanStreamOutput out, DateDiff function) throws IOException { - Source.EMPTY.writeTo(out); - List fields = function.children(); - assert fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeExpression(fields.get(2)); - } - - static DateExtract readDateExtract(PlanStreamInput in) throws IOException { - return new DateExtract(Source.readFrom(in), in.readExpression(), in.readExpression(), in.configuration()); - } - - static void writeDateExtract(PlanStreamOutput out, DateExtract function) throws IOException { - function.source().writeTo(out); - List fields = function.children(); - assert fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - - static DateFormat readDateFormat(PlanStreamInput in) throws IOException { - return new DateFormat(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class), in.configuration()); - } - - static void writeDateFormat(PlanStreamOutput out, DateFormat dateFormat) throws IOException { - dateFormat.source().writeTo(out); - List fields = dateFormat.children(); - assert fields.size() == 1 || fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); - } - - static DateParse readDateTimeParse(PlanStreamInput in) throws IOException { - return new DateParse(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeDateTimeParse(PlanStreamOutput out, DateParse function) throws IOException { - function.source().writeTo(out); - List fields = function.children(); - assert fields.size() == 1 || fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); - } - - static DateTrunc readDateTrunc(PlanStreamInput in) throws IOException { - return new DateTrunc(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeDateTrunc(PlanStreamOutput out, DateTrunc dateTrunc) throws IOException { - dateTrunc.source().writeTo(out); - List fields = dateTrunc.children(); - assert fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - static SpatialIntersects readIntersects(PlanStreamInput in) throws IOException { return new SpatialIntersects(Source.EMPTY, in.readExpression(), in.readExpression()); } @@ -1201,14 +1085,6 @@ static void writeSpatialRelatesFunction(PlanStreamOutput out, SpatialRelatesFunc out.writeExpression(spatialRelatesFunction.right()); } - static Now readNow(PlanStreamInput in) throws IOException { - return new Now(Source.readFrom(in), in.configuration()); - } - - static void writeNow(PlanStreamOutput out, Now function) throws IOException { - Source.EMPTY.writeTo(out); - } - static Round readRound(PlanStreamInput in) throws IOException { return new Round(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class)); } @@ -1303,22 +1179,6 @@ static void writeReplace(PlanStreamOutput out, Replace replace) throws IOExcepti out.writeExpression(fields.get(2)); } - static ToLower readToLower(PlanStreamInput in) throws IOException { - return new ToLower(Source.EMPTY, in.readExpression(), in.configuration()); - } - - static void writeToLower(PlanStreamOutput out, ToLower toLower) throws IOException { - out.writeExpression(toLower.field()); - } - - static ToUpper readToUpper(PlanStreamInput in) throws IOException { - return new ToUpper(Source.EMPTY, in.readExpression(), in.configuration()); - } - - static void writeToUpper(PlanStreamOutput out, ToUpper toUpper) throws IOException { - out.writeExpression(toUpper.field()); - } - static Left readLeft(PlanStreamInput in) throws IOException { return new Left(Source.readFrom(in), in.readExpression(), in.readExpression()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java index fc43f1002d112..13773ca61f8d8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java @@ -31,7 +31,6 @@ public class EsQueryExec extends LeafExec implements EstimatesRowSize { static final EsField DOC_ID_FIELD = new EsField("_doc", DataType.DOC_DATA_TYPE, Map.of(), false); static final EsField TSID_FIELD = new EsField("_tsid", DataType.TSID_DATA_TYPE, Map.of(), true); static final EsField TIMESTAMP_FIELD = new EsField("@timestamp", DataType.DATETIME, Map.of(), true); - static final EsField INTERVAL_FIELD = new EsField("@timestamp_interval", DataType.DATETIME, Map.of(), true); private final EsIndex index; private final IndexMode indexMode; @@ -86,8 +85,7 @@ private static List sourceAttributes(Source source, IndexMode indexMo case TIME_SERIES -> List.of( new FieldAttribute(source, DOC_ID_FIELD.getName(), DOC_ID_FIELD), new FieldAttribute(source, TSID_FIELD.getName(), TSID_FIELD), - new FieldAttribute(source, TIMESTAMP_FIELD.getName(), TIMESTAMP_FIELD), - new FieldAttribute(source, INTERVAL_FIELD.getName(), INTERVAL_FIELD) + new FieldAttribute(source, TIMESTAMP_FIELD.getName(), TIMESTAMP_FIELD) ); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index fdba785f668d7..825057c20d0e0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -32,7 +32,6 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.SourceOperator; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; @@ -184,7 +183,6 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, limit, context.pageSize(rowEstimatedSize), context.queryPragmas().taskConcurrency(), - TimeValue.ZERO, shardContexts, querySupplier(esQueryExec.query()) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 27aa985efd6d0..fd161c8d63871 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -122,29 +122,29 @@ /** * CSV-based unit testing. - * + *

* Queries and their result live *.csv-spec files. * The results used in these files were manually added by running the same query on a real (debug mode) ES node. CsvTestsDataLoader loads * the test data helping to get the said results. - * + *

* CsvTestsDataLoader creates an index using the mapping in mapping-default.json. The same mapping file is also used to create the * IndexResolver that helps validate the correctness of the query and the supported field data types. * The created index and this class uses the data from employees.csv file as data. This class is creating one Page with Blocks in it using * this file and the type of blocks matches the type of the schema specified on the first line of the csv file. These being said, the * mapping in mapping-default.csv and employees.csv should be more or less in sync. An exception to this rule: - * + *

* languages:integer,languages.long:long. The mapping has "long" as a sub-field of "languages". ES knows what to do with sub-field, but * employees.csv is specifically defining "languages.long" as "long" and also has duplicated columns for these two. - * + *

* ATM the first line from employees.csv file is not synchronized with the mapping itself. - * + *

* When we add support for more field types, CsvTests should change to support the new Block types. Same goes for employees.csv file * (the schema needs adjustment) and the mapping-default.json file (to add or change an existing field). * When we add more operators, optimization rules to the logical or physical plan optimizers, there may be the need to change the operators * in TestPhysicalOperationProviders or adjust TestPhysicalPlanOptimizer. For example, the TestPhysicalPlanOptimizer is skipping any * rules that push operations to ES itself (a Limit for example). The TestPhysicalOperationProviders is a bit more complicated than that: * it’s creating its own Source physical operator, aggregation operator (just a tiny bit of it) and field extract operator. - * + *

* To log the results logResults() should return "true". */ // @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") @@ -229,7 +229,10 @@ public final void test() throws Throwable { assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS))); assumeFalse("enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.ENRICH_LOAD))); assumeFalse("can't load metrics in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METRICS_SYNTAX))); - assumeFalse("multiple indices aren't supported", testCase.requiredCapabilities.contains(EsqlCapabilities.UNION_TYPES)); + assumeFalse( + "multiple indices aren't supported", + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.UNION_TYPES.capabilityName()) + ); if (Build.current().isSnapshot()) { assertThat( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 9b33af9f0a2e0..33f9cb3123b8d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -78,6 +78,10 @@ protected boolean alwaysEmptySource() { protected abstract List getNamedWriteables(); + public EsqlConfiguration configuration() { + return config; + } + @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(NamedExpression.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractVarargsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractVarargsSerializationTests.java new file mode 100644 index 0000000000000..67195fa99114b --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractVarargsSerializationTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +import java.io.IOException; +import java.util.List; + +public abstract class AbstractVarargsSerializationTests extends AbstractExpressionSerializationTests { + protected abstract T create(Source source, Expression first, List rest); + + @Override + protected final T createTestInstance() { + Source source = randomSource(); + Expression first = randomChild(); + List rest = randomList(0, 10, AbstractExpressionSerializationTests::randomChild); + return create(source, first, rest); + } + + @Override + protected final T mutateInstance(T instance) throws IOException { + Source source = instance.source(); + Expression first = instance.children().get(0); + List rest = instance.children().subList(1, instance.children().size()); + if (randomBoolean()) { + first = randomValueOtherThan(first, AbstractExpressionSerializationTests::randomChild); + } else { + rest = randomValueOtherThan(rest, () -> randomList(0, 10, AbstractExpressionSerializationTests::randomChild)); + } + return create(instance.source(), first, rest); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index f27438de6df6b..249d4f7349517 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -161,6 +161,24 @@ protected static Iterable parameterSuppliersFromTypedData(List + * Use if possible, as this method may get updated with new checks in the future. + *

+ * + * @param entirelyNullPreservesType See {@link #anyNullIsNull(boolean, List)} + */ + protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks( + boolean entirelyNullPreservesType, + List suppliers + ) { + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples(anyNullIsNull(entirelyNullPreservesType, randomizeBytesRefsOffset(suppliers))) + ); + } + /** * Build an {@link Attribute} that loads a field. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/NotSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/NotSerializationTests.java new file mode 100644 index 0000000000000..61e3690f1633f --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/NotSerializationTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; +import java.util.List; + +public class NotSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return UnaryScalarFunction.getNamedWriteables(); + } + + @Override + protected Not createTestInstance() { + return new Not(randomSource(), randomChild()); + } + + @Override + protected Not mutateInstance(Not instance) throws IOException { + Source source = instance.source(); + Expression child = randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild); + return new Not(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseSerializationTests.java new file mode 100644 index 0000000000000..69bbf2f76937f --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseSerializationTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.util.List; + +public class CaseSerializationTests extends AbstractVarargsSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected Case create(Source source, Expression first, List rest) { + return new Case(source, first, rest); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestSerializationTests.java new file mode 100644 index 0000000000000..43e1fe405911a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestSerializationTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.util.List; + +public class GreatestSerializationTests extends AbstractVarargsSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected Greatest create(Source source, Expression first, List rest) { + return new Greatest(source, first, rest); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastSerializationTests.java new file mode 100644 index 0000000000000..f552713af4dbe --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastSerializationTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.util.List; + +public class LeastSerializationTests extends AbstractVarargsSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected Least create(Source source, Expression first, List rest) { + return new Least(source, first, rest); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java index 2096d9cec75b1..d97f070275617 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java @@ -55,7 +55,7 @@ public static Iterable parameters() { ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java index dd35e04708c9f..4c9175e4906bf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java @@ -55,7 +55,7 @@ public static Iterable parameters() { ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java index 3a25ad6b56d0c..c4e53d922ac60 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java @@ -80,7 +80,7 @@ public static Iterable parameters() { emptyList() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java index b520e559c45d7..1c1431fe3b7ea 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java @@ -72,7 +72,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java index 9eb1155a209a1..48a610804845d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java @@ -73,7 +73,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index 0ef931710422e..6aef91be43088 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -162,7 +162,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static String randomDateString(long from, long to) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java index b7cb03879fd6f..fc45c8b26a869 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java @@ -89,7 +89,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java index 6438a8422a664..5f45cc11d9c5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java @@ -139,7 +139,7 @@ public static Iterable parameters() { List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java index e1af4441b3c5f..2b5dc453acc23 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java @@ -66,7 +66,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java index 291708e94888c..bca8dc822052f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java @@ -66,7 +66,7 @@ public static Iterable parameters() { List.of() ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java index 415d9ea0a4a70..20b48d24f8211 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java @@ -63,7 +63,7 @@ public static Iterable parameters() { ); // add null as parameter - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java index 83bdaf2f2d304..45837c2110ff3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java @@ -268,7 +268,7 @@ public static Iterable parameters() { List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index 92b0bb192e2aa..565562b8574d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -227,7 +227,7 @@ public static Iterable parameters() { l -> ((Integer) l).longValue(), List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java index 67951b46d03b5..3f6e28c65142f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java @@ -70,7 +70,7 @@ public static Iterable parameters() { List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 511df557ff842..0556742b55b3c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -130,7 +130,7 @@ public static Iterable parameters() { v -> new BytesRef(v.toString()), List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java index 4182f99d316fc..44092db499d2d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java @@ -244,7 +244,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java index a397de64aeea8..34281442872a5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java @@ -60,7 +60,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffSerializationTests.java new file mode 100644 index 0000000000000..b1dc1b064ae5a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffSerializationTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class DateDiffSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected DateDiff createTestInstance() { + Source source = randomSource(); + Expression unit = randomChild(); + Expression startTimestamp = randomChild(); + Expression endTimestamp = randomChild(); + return new DateDiff(source, unit, startTimestamp, endTimestamp); + } + + @Override + protected DateDiff mutateInstance(DateDiff instance) throws IOException { + Source source = instance.source(); + Expression unit = instance.unit(); + Expression startTimestamp = instance.startTimestamp(); + Expression endTimestamp = instance.endTimestamp(); + switch (between(0, 2)) { + case 0 -> unit = randomValueOtherThan(unit, AbstractExpressionSerializationTests::randomChild); + case 1 -> startTimestamp = randomValueOtherThan(startTimestamp, AbstractExpressionSerializationTests::randomChild); + case 2 -> endTimestamp = randomValueOtherThan(endTimestamp, AbstractExpressionSerializationTests::randomChild); + } + return new DateDiff(source, unit, startTimestamp, endTimestamp); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractSerializationTests.java new file mode 100644 index 0000000000000..6e1c061c84f2e --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractSerializationTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class DateExtractSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected DateExtract createTestInstance() { + Source source = randomSource(); + Expression datePart = randomChild(); + Expression field = randomChild(); + return new DateExtract(source, datePart, field, configuration()); + } + + @Override + protected DateExtract mutateInstance(DateExtract instance) throws IOException { + Source source = instance.source(); + Expression datePart = instance.datePart(); + Expression field = instance.field(); + if (randomBoolean()) { + datePart = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } else { + field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } + return new DateExtract(source, datePart, field, configuration()); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index 221f3fd51a545..bce3b7efebbb6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -39,54 +39,50 @@ public DateExtractTests(@Name("TestCase") Supplier te @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, - List.of( - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.KEYWORD, "chrono"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") - ), - "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", - DataType.LONG, - equalTo(2023L) - ) + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of( + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.KEYWORD, "chrono"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") ), - new TestCaseSupplier( - List.of(DataType.TEXT, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.TEXT, "chrono"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") - ), - "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", - DataType.LONG, - equalTo(2023L) - ) + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", + DataType.LONG, + equalTo(2023L) + ) + ), + new TestCaseSupplier( + List.of(DataType.TEXT, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.TEXT, "chrono"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") ), - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("not a unit"), DataType.KEYWORD, "chrono"), - new TestCaseSupplier.TypedData(0L, DataType.DATETIME, "date") + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", + DataType.LONG, + equalTo(2023L) + ) + ), + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("not a unit"), DataType.KEYWORD, "chrono"), + new TestCaseSupplier.TypedData(0L, DataType.DATETIME, "date") - ), - "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", - DataType.LONG, - is(nullValue()) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning( - "Line -1:-1: java.lang.IllegalArgumentException: " - + "No enum constant java.time.temporal.ChronoField.NOT A UNIT" - ) - .withFoldingException(InvalidArgumentException.class, "invalid date field for []: not a unit") + ), + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", + DataType.LONG, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: " + + "No enum constant java.time.temporal.ChronoField.NOT A UNIT" ) - ) + .withFoldingException(InvalidArgumentException.class, "invalid date field for []: not a unit") ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatSerializationTests.java new file mode 100644 index 0000000000000..4dff735318558 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatSerializationTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class DateFormatSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected DateFormat createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression format = randomBoolean() ? null : randomChild(); + return new DateFormat(source, field, format, configuration()); + } + + @Override + protected DateFormat mutateInstance(DateFormat instance) throws IOException { + Source source = instance.source(); + Expression field = instance.field(); + Expression format = instance.format(); + if (randomBoolean()) { + field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } else { + format = randomValueOtherThan(format, () -> randomBoolean() ? null : randomChild()); + } + return new DateFormat(source, field, format, configuration()); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java index 6e1b5caa710e1..b18748187709a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java @@ -31,35 +31,31 @@ public DateFormatTests(@Name("TestCase") Supplier tes @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, - List.of( - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.KEYWORD, "formatter"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") - ), - "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", - DataType.KEYWORD, - equalTo(BytesRefs.toBytesRef("2023")) - ) + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of( + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.KEYWORD, "formatter"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") ), - new TestCaseSupplier( - List.of(DataType.TEXT, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.TEXT, "formatter"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") - ), - "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", - DataType.KEYWORD, - equalTo(BytesRefs.toBytesRef("2023")) - ) - ) + "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", + DataType.KEYWORD, + equalTo(BytesRefs.toBytesRef("2023")) + ) + ), + new TestCaseSupplier( + List.of(DataType.TEXT, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.TEXT, "formatter"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") + ), + "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", + DataType.KEYWORD, + equalTo(BytesRefs.toBytesRef("2023")) ) ) ) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseSerializationTests.java new file mode 100644 index 0000000000000..e816f2c4a20fb --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseSerializationTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class DateParseSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected DateParse createTestInstance() { + Source source = randomSource(); + Expression first = randomChild(); + Expression second = randomBoolean() ? null : randomChild(); + return new DateParse(source, first, second); + } + + @Override + protected DateParse mutateInstance(DateParse instance) throws IOException { + Source source = instance.source(); + Expression first = instance.children().get(0); + Expression second = instance.children().size() == 1 ? null : instance.children().get(1); + if (randomBoolean()) { + first = randomValueOtherThan(first, AbstractExpressionSerializationTests::randomChild); + } else { + second = randomValueOtherThan(second, () -> randomBoolean() ? null : randomChild()); + } + return new DateParse(source, first, second); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index 8906994c6d7eb..161b338cc85b2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -35,102 +35,97 @@ public DateParseTests(@Name("TestCase") Supplier test @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, - List.of( - new TestCaseSupplier( - "Basic Case", - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of( + new TestCaseSupplier( + "Basic Case", + List.of(DataType.KEYWORD, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") ), - new TestCaseSupplier( - "With Text", - List.of(DataType.KEYWORD, DataType.TEXT), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + "With Text", + List.of(DataType.KEYWORD, DataType.TEXT), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") ), - new TestCaseSupplier( - "With Both Text", - List.of(DataType.TEXT, DataType.TEXT), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + "With Both Text", + List.of(DataType.TEXT, DataType.TEXT), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") ), - new TestCaseSupplier( - "With keyword", - List.of(DataType.TEXT, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + "With keyword", + List.of(DataType.TEXT, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") ), - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("not a format"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("not a format"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - is(nullValue()) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning( - "Line -1:-1: java.lang.IllegalArgumentException: Invalid format: " - + "[not a format]: Unknown pattern letter: o" - ) - .withFoldingException( - InvalidArgumentException.class, - "invalid date pattern for []: Invalid format: [not a format]: Unknown pattern letter: o" - ) ), - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("not a date"), DataType.KEYWORD, "second") + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: Invalid format: " + "[not a format]: Unknown pattern letter: o" + ) + .withFoldingException( + InvalidArgumentException.class, + "invalid date pattern for []: Invalid format: [not a format]: Unknown pattern letter: o" + ) + ), + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("not a date"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - is(nullValue()) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning( - "Line -1:-1: java.lang.IllegalArgumentException: " - + "failed to parse date field [not a date] with format [yyyy-MM-dd]" - ) + ), + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: " + + "failed to parse date field [not a date] with format [yyyy-MM-dd]" ) - ) ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncSerializationTests.java new file mode 100644 index 0000000000000..09d2e06003128 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncSerializationTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class DateTruncSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected DateTrunc createTestInstance() { + Source source = randomSource(); + Expression interval = randomChild(); + Expression field = randomChild(); + return new DateTrunc(source, interval, field); + } + + @Override + protected DateTrunc mutateInstance(DateTrunc instance) throws IOException { + Source source = instance.source(); + Expression interval = instance.interval(); + Expression field = instance.field(); + if (randomBoolean()) { + interval = randomValueOtherThan(interval, AbstractExpressionSerializationTests::randomChild); + } else { + field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } + return new DateTrunc(source, interval, field); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java index b627d7cd88908..4c5a7d3734ce3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java @@ -54,7 +54,7 @@ public static Iterable parameters() { ofDuration(Duration.ofSeconds(30), ts, "2023-02-17T10:25:30.00Z"), randomSecond() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } public void testCreateRoundingDuration() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowSerializationTests.java new file mode 100644 index 0000000000000..3bb8c2f260561 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class NowSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected Now createTestInstance() { + return new Now(randomSource(), configuration()); + } + + @Override + protected Now mutateInstance(Now instance) throws IOException { + return null; + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/NowTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowTests.java similarity index 97% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/NowTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowTests.java index 2c1322abf8cda..8edc21db427d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/NowTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.expression.function.scalar.math; +package org.elasticsearch.xpack.esql.expression.function.scalar.date; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.Now; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.hamcrest.Matcher; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java index 2aaca179b2bc4..0d8f4bc7ea115 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java @@ -84,7 +84,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java index d2b5e0a455229..a575eb48c4bd7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java @@ -106,7 +106,7 @@ public static Iterable parameters() { }) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, randomizeBytesRefsOffset(suppliers)))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java index 63642a01fa117..7bd195ab86389 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java @@ -63,7 +63,7 @@ public static Iterable parameters() { equalTo(Math.abs(arg)) ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } public AbsTests(@Name("TestCase") Supplier testCaseSupplier) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java index 2266494391262..3b81316da5676 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java @@ -36,7 +36,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java index 8c7000940390b..c92c626a5601b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java index 735113c34ca1b..ff61ecfa39687 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java @@ -66,7 +66,7 @@ public static Iterable parameters() { UNSIGNED_LONG_MAX, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java index 981c6812d1176..61e7a1f051905 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java index cb666f03494e5..1ea63cc006e9c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java @@ -61,7 +61,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java index 62c23369cc436..f0c990ec64af1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java @@ -50,7 +50,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java index ce23598bf980d..0d9bd6bcae64a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java index 5d349e09aed2e..8f78e8ee67106 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java @@ -61,7 +61,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java index c138fc12881fd..86c59a7a06cf4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java index 585e75d05e378..1f4fef4ab15c8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java index 966a5a590e256..43c683040eac4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java @@ -55,7 +55,7 @@ public static Iterable parameters() { */ (size, data) -> avg.apply(size, data.mapToDouble(v -> unsignedLongToDouble(NumericUtils.asLongUnsigned(v)))) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java index 39ef5eefe9287..ba4ddb1be84cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java @@ -68,7 +68,7 @@ public static Iterable parameters() { } } } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java index 8733dc0d25c40..8c8772f8ed4e2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java @@ -40,7 +40,7 @@ public static Iterable parameters() { cartesianPoints(cases, "mv_count", "MvCount", DataType.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); geoShape(cases, "mv_count", "MvCount", DataType.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); cartesianShape(cases, "mv_count", "MvCount", DataType.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java index 1c24b1a8aae64..6e143d9175f41 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java @@ -41,7 +41,7 @@ public static Iterable parameters() { cartesianPoints(cases, "mv_first", "MvFirst", DataType.CARTESIAN_POINT, (size, values) -> equalTo(values.findFirst().get())); geoShape(cases, "mv_first", "MvFirst", DataType.GEO_SHAPE, (size, values) -> equalTo(values.findFirst().get())); cartesianShape(cases, "mv_first", "MvFirst", DataType.CARTESIAN_SHAPE, (size, values) -> equalTo(values.findFirst().get())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java index 1b6fb482ea3d0..83d94f2cc9884 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java @@ -41,7 +41,7 @@ public static Iterable parameters() { cartesianPoints(cases, "mv_last", "MvLast", DataType.CARTESIAN_POINT, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); geoShape(cases, "mv_last", "MvLast", DataType.GEO_SHAPE, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); cartesianShape(cases, "mv_last", "MvLast", DataType.CARTESIAN_SHAPE, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java index 5af662c2642cc..63530234e53fa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java @@ -38,7 +38,7 @@ public static Iterable parameters() { longs(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsLong())); unsignedLongs(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.reduce(BigInteger::max).get())); dateTimes(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsLong())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java index 4c324c916f861..f44f5d44e3f62 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java @@ -92,7 +92,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java index 6f398c8a7ac92..5be67548f784e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java @@ -38,7 +38,7 @@ public static Iterable parameters() { longs(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsLong())); unsignedLongs(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.reduce(BigInteger::min).get())); dateTimes(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsLong())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceSerializationTests.java new file mode 100644 index 0000000000000..7cab0a957b235 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceSerializationTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.nulls; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.util.List; + +public class CoalesceSerializationTests extends AbstractVarargsSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected Coalesce create(Source source, Expression first, List rest) { + return new Coalesce(source, first, rest); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullSerializationTests.java new file mode 100644 index 0000000000000..23545b3627a1a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.nulls; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class IsNotNullSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return UnaryScalarFunction.getNamedWriteables(); + } + + @Override + protected IsNotNull createTestInstance() { + return new IsNotNull(randomSource(), randomChild()); + } + + @Override + protected IsNotNull mutateInstance(IsNotNull instance) throws IOException { + Source source = instance.source(); + Expression child = randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild); + return new IsNotNull(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullSerializationTests.java new file mode 100644 index 0000000000000..354a2129d7ec0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.nulls; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class IsNullSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return UnaryScalarFunction.getNamedWriteables(); + } + + @Override + protected IsNull createTestInstance() { + return new IsNull(randomSource(), randomChild()); + } + + @Override + protected IsNull mutateInstance(IsNull instance) throws IOException { + Source source = instance.source(); + Expression child = randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild); + return new IsNull(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java index b466ffe1e92f1..fa0fc8465ce7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java @@ -36,7 +36,7 @@ public static Iterable parameters() { final List suppliers = new ArrayList<>(); TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static double valueOf(BytesRef wkb) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java index 1f3639bf1ecb4..15f34271be779 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java @@ -36,7 +36,7 @@ public static Iterable parameters() { final List suppliers = new ArrayList<>(); TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static double valueOf(BytesRef wkb) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java index f44a51b0e53bb..27e3fc8684efc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java @@ -67,7 +67,7 @@ static Iterable parameters(String name, boolean trimLeading, boolean t })); } } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } private static TestCaseSupplier.TestCase testCase(String name, DataType type, String data, String expected) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatSerializationTests.java new file mode 100644 index 0000000000000..30f6acffbaf8a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatSerializationTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.util.List; + +public class ConcatSerializationTests extends AbstractVarargsSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected Concat create(Source source, Expression first, List rest) { + return new Concat(source, first, rest); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java index e6a5d30d0fa53..7d6e3439c8063 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java @@ -167,7 +167,7 @@ public static Iterable parameters() { ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static String unicodeLeftSubstring(String str, int length) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java index 81fcc118ade05..4a7e6b3a0996d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java @@ -49,7 +49,7 @@ public static Iterable parameters() { cases.addAll(makeTestCases("6 bytes, 2 code points", () -> "❗️", 2)); cases.addAll(makeTestCases("100 random alpha", () -> randomAlphaOfLength(100), 100)); cases.addAll(makeTestCases("100 random code points", () -> randomUnicodeOfCodepointLength(100), 100)); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases); } private static List makeTestCases(String title, Supplier text, int expectedLength) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java index bfadf66f7f5cc..82581b69f8713 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java @@ -103,7 +103,7 @@ public static Iterable parameters() { "Unclosed character class near index 0\n[\n^".replaceAll("\n", System.lineSeparator()) ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } private static TestCaseSupplier fixedCase(String name, String str, String oldStr, String newStr, String result) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java index 599ab51995217..9d2b55e02fff7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java @@ -166,7 +166,7 @@ public static Iterable parameters() { equalTo(new BytesRef(unicodeRightSubstring(text, length))) ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static String unicodeRightSubstring(String str, int length) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index 47e48df90007e..bf2dd0359a352 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -64,7 +64,7 @@ public static Iterable parameters() { })); } } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java index c1a49455d9d83..0ee60cfc77d2f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java @@ -36,61 +36,56 @@ public SubstringTests(@Name("TestCase") Supplier test @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of(new TestCaseSupplier("Substring basic test", List.of(DataType.KEYWORD, DataType.INTEGER, DataType.INTEGER), () -> { + int start = between(1, 8); + int length = between(1, 10 - start); + String text = randomAlphaOfLength(10); + return new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier("Substring basic test", List.of(DataType.KEYWORD, DataType.INTEGER, DataType.INTEGER), () -> { - int start = between(1, 8); - int length = between(1, 10 - start); - String text = randomAlphaOfLength(10); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"), - new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), - new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") - ), - "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", - DataType.KEYWORD, - equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) - ); - }), - new TestCaseSupplier( - "Substring basic test with text input", - List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), - () -> { - int start = between(1, 8); - int length = between(1, 10 - start); - String text = randomAlphaOfLength(10); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef(text), DataType.TEXT, "str"), - new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), - new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") - ), - "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", - DataType.KEYWORD, - equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) - ); - } + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") + ), + "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", + DataType.KEYWORD, + equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) + ); + }), + new TestCaseSupplier( + "Substring basic test with text input", + List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), + () -> { + int start = between(1, 8); + int length = between(1, 10 - start); + String text = randomAlphaOfLength(10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.TEXT, "str"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") + ), + "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", + DataType.KEYWORD, + equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) + ); + } + ), + new TestCaseSupplier("Substring empty string", List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), () -> { + int start = between(1, 8); + int length = between(1, 10 - start); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(""), DataType.TEXT, "str"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") ), - new TestCaseSupplier("Substring empty string", List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), () -> { - int start = between(1, 8); - int length = between(1, 10 - start); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef(""), DataType.TEXT, "str"), - new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), - new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") - ), - "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", - DataType.KEYWORD, - equalTo(new BytesRef("")) - ); - }) - ) - ) + "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", + DataType.KEYWORD, + equalTo(new BytesRef("")) + ); + }) ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerSerializationTests.java new file mode 100644 index 0000000000000..f2dbdbd74470a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerSerializationTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class ToLowerSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected ToLower createTestInstance() { + return new ToLower(randomSource(), randomChild(), configuration()); + } + + @Override + protected ToLower mutateInstance(ToLower instance) throws IOException { + Source source = instance.source(); + Expression child = randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild); + return new ToLower(source, child, configuration()); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index 99b2b38aa8611..abb419e1e4a81 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } public void testRandomLocale() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperSerializationTests.java new file mode 100644 index 0000000000000..e57aedd79fdfd --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperSerializationTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; + +import java.io.IOException; +import java.util.List; + +public class ToUpperSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected List getNamedWriteables() { + return EsqlScalarFunction.getNamedWriteables(); + } + + @Override + protected ToUpper createTestInstance() { + return new ToUpper(randomSource(), randomChild(), configuration()); + } + + @Override + protected ToUpper mutateInstance(ToUpper instance) throws IOException { + Source source = instance.source(); + Expression child = randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild); + return new ToUpper(source, child, configuration()); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index 7b8e6abcdb3db..f101cacd73dc5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } public void testRandomLocale() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java index c2a9766c23cbe..a628416ecc4b7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java @@ -110,7 +110,7 @@ public static Iterable parameters() { equalTo(arg.negated()) ); }))); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index 853096626179e..5ecdb4864a9cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; @@ -164,7 +165,7 @@ private EsPhysicalOperationProviders esPhysicalOperationProviders() throws IOExc new EsPhysicalOperationProviders.DefaultShardContext( i, createSearchExecutionContext(createMapperService(mapping(b -> {})), searcher), - null + AliasFilter.EMPTY ) ); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java index 54b9cdca98393..93d5587153181 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java @@ -58,7 +58,10 @@ protected Settings nodeSettings() { } public void testModeSnapshotRestore() throws Exception { - clusterAdmin().preparePutRepository("repo").setType("fs").setSettings(Settings.builder().put("location", "repo").build()).get(); + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") + .setType("fs") + .setSettings(Settings.builder().put("location", "repo").build()) + .get(); client().execute( PutSnapshotLifecycleAction.INSTANCE, @@ -104,7 +107,7 @@ public void testModeSnapshotRestore() throws Exception { try { GetSnapshotsResponse getResp = client().execute( TransportGetSnapshotsAction.TYPE, - new GetSnapshotsRequest(new String[] { "repo" }, new String[] { snapshotName }) + new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, new String[] { "repo" }, new String[] { snapshotName }) ).get(); assertThat(getResp.getSnapshots().size(), equalTo(1)); assertThat(getResp.getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); @@ -121,7 +124,9 @@ public void testModeSnapshotRestore() throws Exception { // Restore snapshot client().execute( TransportRestoreSnapshotAction.TYPE, - new RestoreSnapshotRequest("repo", snapshotName).includeGlobalState(true).indices(Strings.EMPTY_ARRAY).waitForCompletion(true) + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, "repo", snapshotName).includeGlobalState(true) + .indices(Strings.EMPTY_ARRAY) + .waitForCompletion(true) ).get(); assertBusy(() -> assertThat(ilmMode(), equalTo(OperationMode.STOPPED))); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index 58603526a9c56..a352116278e7a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -30,6 +30,7 @@ import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -159,4 +160,5 @@ public static SimilarityMeasure randomSimilarityMeasure() { return randomFrom(SimilarityMeasure.values()); } + public record PersistedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 709cc4d3494fd..d26b02ddba62b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -60,6 +60,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -1068,28 +1069,18 @@ private Map getRequestConfigMap( ); } - private record PeristedConfigRecord(Map config, Map secrets) {} - - private PeristedConfigRecord getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfigRecord( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfigRecord getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PeristedConfigRecord( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } - private static Map getEmbeddingsServiceSettingsMap( String target, String provider, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionServiceSettingsTests.java index d46a5f190017a..95be365706ccb 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionServiceSettingsTests.java @@ -7,15 +7,18 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.completion; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioEndpointType; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioProvider; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; import org.hamcrest.CoreMatchers; import java.io.IOException; @@ -27,7 +30,8 @@ import static org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants.TARGET_FIELD; import static org.hamcrest.Matchers.is; -public class AzureAiStudioChatCompletionServiceSettingsTests extends ESTestCase { +public class AzureAiStudioChatCompletionServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AzureAiStudioChatCompletionServiceSettings> { public void testFromMap_Request_CreatesSettingsCorrectly() { var target = "http://sometarget.local"; var provider = "openai"; @@ -119,4 +123,38 @@ public void testToFilteredXContent_WritesAllValues() throws IOException { public static HashMap createRequestSettingsMap(String target, String provider, String endpointType) { return new HashMap<>(Map.of(TARGET_FIELD, target, PROVIDER_FIELD, provider, ENDPOINT_TYPE_FIELD, endpointType)); } + + @Override + protected Writeable.Reader instanceReader() { + return AzureAiStudioChatCompletionServiceSettings::new; + } + + @Override + protected AzureAiStudioChatCompletionServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AzureAiStudioChatCompletionServiceSettings mutateInstance(AzureAiStudioChatCompletionServiceSettings instance) + throws IOException { + return randomValueOtherThan(instance, AzureAiStudioChatCompletionServiceSettingsTests::createRandom); + } + + @Override + protected AzureAiStudioChatCompletionServiceSettings mutateInstanceForVersion( + AzureAiStudioChatCompletionServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + private static AzureAiStudioChatCompletionServiceSettings createRandom() { + return new AzureAiStudioChatCompletionServiceSettings( + randomAlphaOfLength(10), + randomFrom(AzureAiStudioProvider.values()), + randomFrom(AzureAiStudioEndpointType.values()), + RateLimitSettingsTests.createRandom() + ); + } + } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java index bc541bbcf5369..d48068d5a4008 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java @@ -7,13 +7,15 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.completion; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.hamcrest.MatcherAssert; import java.io.IOException; @@ -27,7 +29,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -public class AzureAiStudioChatCompletionTaskSettingsTests extends ESTestCase { +public class AzureAiStudioChatCompletionTaskSettingsTests extends AbstractBWCWireSerializationTestCase< + AzureAiStudioChatCompletionTaskSettings> { public void testFromMap_AllValues() { var taskMap = getTaskSettingsMap(1.0, 2.0, true, 512); @@ -183,4 +186,36 @@ public static Map getTaskSettingsMap( return map; } + + @Override + protected Writeable.Reader instanceReader() { + return AzureAiStudioChatCompletionTaskSettings::new; + } + + @Override + protected AzureAiStudioChatCompletionTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AzureAiStudioChatCompletionTaskSettings mutateInstance(AzureAiStudioChatCompletionTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, AzureAiStudioChatCompletionTaskSettingsTests::createRandom); + } + + @Override + protected AzureAiStudioChatCompletionTaskSettings mutateInstanceForVersion( + AzureAiStudioChatCompletionTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + private static AzureAiStudioChatCompletionTaskSettings createRandom() { + return new AzureAiStudioChatCompletionTaskSettings( + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(randomFrom(new Boolean[] { null, randomBoolean() })), + randomFrom(new Integer[] { null, randomNonNegativeInt() }) + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java index a592dd6e1f956..05388192b2f14 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java @@ -7,20 +7,23 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.embeddings; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.SimilarityMeasure; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioEndpointType; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioProvider; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; import org.hamcrest.CoreMatchers; import org.hamcrest.MatcherAssert; @@ -32,7 +35,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -public class AzureAiStudioEmbeddingsServiceSettingsTests extends ESTestCase { +public class AzureAiStudioEmbeddingsServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AzureAiStudioEmbeddingsServiceSettings> { public void testFromMap_Request_CreatesSettingsCorrectly() { var target = "http://sometarget.local"; @@ -336,4 +340,40 @@ public static HashMap createRequestSettingsMap( return map; } + + @Override + protected Writeable.Reader instanceReader() { + return AzureAiStudioEmbeddingsServiceSettings::new; + } + + @Override + protected AzureAiStudioEmbeddingsServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AzureAiStudioEmbeddingsServiceSettings mutateInstance(AzureAiStudioEmbeddingsServiceSettings instance) throws IOException { + return randomValueOtherThan(instance, AzureAiStudioEmbeddingsServiceSettingsTests::createRandom); + } + + @Override + protected AzureAiStudioEmbeddingsServiceSettings mutateInstanceForVersion( + AzureAiStudioEmbeddingsServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + private static AzureAiStudioEmbeddingsServiceSettings createRandom() { + return new AzureAiStudioEmbeddingsServiceSettings( + randomAlphaOfLength(10), + randomFrom(AzureAiStudioProvider.values()), + randomFrom(AzureAiStudioEndpointType.values()), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomBoolean(), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomFrom(new SimilarityMeasure[] { null, randomFrom(SimilarityMeasure.values()) }), + RateLimitSettingsTests.createRandom() + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java index 3d1b7f0c7499c..5b8c95edcc0ad 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java @@ -7,13 +7,15 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.embeddings; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants; import org.hamcrest.MatcherAssert; @@ -23,7 +25,7 @@ import static org.hamcrest.Matchers.is; -public class AzureAiStudioEmbeddingsTaskSettingsTests extends ESTestCase { +public class AzureAiStudioEmbeddingsTaskSettingsTests extends AbstractBWCWireSerializationTestCase { public void testFromMap_WithUser() { assertEquals( @@ -98,4 +100,31 @@ public static Map getTaskSettingsMap(@Nullable String user) { } return map; } + + @Override + protected Writeable.Reader instanceReader() { + return AzureAiStudioEmbeddingsTaskSettings::new; + } + + @Override + protected AzureAiStudioEmbeddingsTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AzureAiStudioEmbeddingsTaskSettings mutateInstance(AzureAiStudioEmbeddingsTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, AzureAiStudioEmbeddingsTaskSettingsTests::createRandom); + } + + @Override + protected AzureAiStudioEmbeddingsTaskSettings mutateInstanceForVersion( + AzureAiStudioEmbeddingsTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + private static AzureAiStudioEmbeddingsTaskSettings createRandom() { + return new AzureAiStudioEmbeddingsTaskSettings(randomFrom(new String[] { null, randomAlphaOfLength(15) })); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index de474ea1b4237..c3e8eb5c621d2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -54,6 +54,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -394,7 +395,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe getAzureOpenAiRequestTaskSettingsMap("user"), getAzureOpenAiSecretSettingsMap("secret", null) ); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -1159,25 +1160,22 @@ private Map getRequestConfigMap( ); } - private PeristedConfig getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PeristedConfig( + private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), null ); } - - private record PeristedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index 5b3cb9eade9de..e28ca71c30ff8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -58,6 +58,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -421,7 +422,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe getTaskSettingsMap(null, null), getSecretSettingsMap("secret") ); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -1394,6 +1395,4 @@ private PersistedConfig getPersistedConfigMap(Map serviceSetting null ); } - - private record PersistedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java index 1cdd7997b96c0..45dd8ad7b33bd 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java @@ -57,6 +57,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -963,7 +964,4 @@ private PersistedConfig getPersistedConfigMap(Map serviceSetting null ); } - - private record PersistedConfig(Map config, Map secrets) {} - } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index a855437ce0738..de5c7ec83d57e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -56,6 +56,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResultsTests.asMapWithListsInsteadOfArrays; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -278,7 +279,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -711,18 +712,15 @@ private Map getRequestConfigMap(Map serviceSetti return new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings)); } - private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap(Map serviceSettings) { + private PersistedConfig getPersistedConfigMap(Map serviceSettings) { return getPersistedConfigMap(serviceSettings, Map.of(), null); } - private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap( - Map serviceSettings, - @Nullable Map secretSettings - ) { + private PersistedConfig getPersistedConfigMap(Map serviceSettings, @Nullable Map secretSettings) { return getPersistedConfigMap(serviceSettings, Map.of(), secretSettings); } - private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings @@ -730,11 +728,9 @@ private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap( var secrets = secretSettings == null ? null : new HashMap(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)); - return new HuggingFaceServiceTests.PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), secrets ); } - - private record PeristedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java index 508d5a97fe564..ba37203d9e5d6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java @@ -54,6 +54,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -586,28 +587,18 @@ private Map getRequestConfigMap( ); } - private record PeristedConfigRecord(Map config, Map secrets) {} - - private PeristedConfigRecord getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfigRecord( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfigRecord getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PeristedConfigRecord( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } - private static Map getEmbeddingsServiceSettingsMap( String model, @Nullable Integer dimensions, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index e0e1ee3e81aef..2fc049dd3a5f6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -55,6 +55,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -480,7 +481,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe getTaskSettingsMap("user"), getSecretSettingsMap("secret") ); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -1308,25 +1309,23 @@ private Map getRequestConfigMap( ); } - private PeristedConfig getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { + private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - return new PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), null ); } - - private record PeristedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 81abe3dc5c088..33efabf101be7 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -28,9 +27,10 @@ import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.mapper.CompositeSyntheticFieldLoader; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; +import org.elasticsearch.index.mapper.IgnoreMalformedStoredValues; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; @@ -43,7 +43,6 @@ import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.index.mapper.TimeSeriesParams.MetricType; import org.elasticsearch.index.mapper.ValueFetcher; -import org.elasticsearch.index.mapper.XContentDataHelper; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.ScriptCompiler; @@ -53,6 +52,7 @@ import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.sort.BucketedSort; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.CopyingXContentParser; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentSubParser; @@ -592,9 +592,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio EnumMap metricsParsed = new EnumMap<>(Metric.class); // Preserves the content of the field in order to be able to construct synthetic source // if field value is malformed. - XContentBuilder malformedContentForSyntheticSource = context.mappingLookup().isSourceSynthetic() && ignoreMalformed - ? XContentBuilder.builder(context.parser().contentType().xContent()) - : null; + XContentBuilder malformedDataForSyntheticSource = null; try { token = context.parser().currentToken(); @@ -603,11 +601,14 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio return; } ensureExpectedToken(XContentParser.Token.START_OBJECT, token, context.parser()); - subParser = new XContentSubParser(context.parser()); - token = subParser.nextToken(); - if (malformedContentForSyntheticSource != null) { - malformedContentForSyntheticSource.startObject(); + if (context.mappingLookup().isSourceSynthetic() && ignoreMalformed) { + var copyingParser = new CopyingXContentParser(context.parser()); + malformedDataForSyntheticSource = copyingParser.getBuilder(); + subParser = new XContentSubParser(copyingParser); + } else { + subParser = new XContentSubParser(context.parser()); } + token = subParser.nextToken(); while (token != XContentParser.Token.END_OBJECT) { // should be an object sub-field with name a metric name ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, subParser); @@ -621,9 +622,6 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } token = subParser.nextToken(); - if (malformedContentForSyntheticSource != null) { - malformedContentForSyntheticSource.field(fieldName); - } // Make sure that the value is a number. Probably this will change when // new aggregate metric types are added (histogram, cardinality etc) ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, subParser); @@ -632,9 +630,6 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio try { Number metricValue = delegateFieldMapper.value(context.parser()); metricsParsed.put(metric, metricValue); - if (malformedContentForSyntheticSource != null) { - malformedContentForSyntheticSource.value(metricValue); - } } catch (IllegalArgumentException e) { throw new IllegalArgumentException("failed to parse [" + metric.name() + "] sub field: " + e.getMessage(), e); } @@ -677,24 +672,20 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } } catch (Exception e) { if (ignoreMalformed) { - if (malformedContentForSyntheticSource != null) { - if (subParser != null) { - // Remaining data in parser needs to be stored as is in order to provide it in synthetic source. - XContentHelper.drainAndClose(subParser, malformedContentForSyntheticSource); - } else { - // We don't use DrainingXContentParser since we don't want to go beyond current field - malformedContentForSyntheticSource.copyCurrentStructure(context.parser()); - } - ; - var nameValue = IgnoredSourceFieldMapper.NameValue.fromContext( - context, - name(), - XContentDataHelper.encodeXContentBuilder(malformedContentForSyntheticSource) - ); - context.addIgnoredField(nameValue); - } else if (subParser != null) { + if (subParser != null) { // close the subParser, so we advance to the end of the object subParser.close(); + } else { + if (context.mappingLookup().isSourceSynthetic()) { + // There is a malformed value, but it is not an object (since subParser is null). + // So we just need to copy this single value. + malformedDataForSyntheticSource = XContentBuilder.builder(context.parser().contentType().xContent()) + .copyCurrentStructure(context.parser()); + } + } + + if (malformedDataForSyntheticSource != null) { + context.doc().add(IgnoreMalformedStoredValues.storedField(name(), malformedDataForSyntheticSource)); } context.addIgnoredField(name()); @@ -724,11 +715,15 @@ protected SyntheticSourceMode syntheticSourceMode() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - // Note that malformed values are handled via `IgnoredSourceFieldMapper` infrastructure - return new AggregateMetricSyntheticFieldLoader(name(), simpleName(), metrics); + return new CompositeSyntheticFieldLoader( + simpleName(), + name(), + new AggregateMetricSyntheticFieldLoader(name(), simpleName(), metrics), + new CompositeSyntheticFieldLoader.MalformedValuesLayer(name()) + ); } - public static class AggregateMetricSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + public static class AggregateMetricSyntheticFieldLoader implements CompositeSyntheticFieldLoader.SyntheticFieldLoaderLayer { private final String name; private final String simpleName; private final EnumSet metrics; @@ -746,6 +741,11 @@ public String fieldName() { return name; } + @Override + public long valueCount() { + return hasValue() ? 1 : 0; + } + @Override public Stream> storedFieldLoaders() { return Stream.of(); @@ -779,7 +779,7 @@ public void write(XContentBuilder b) throws IOException { if (metricHasValue.isEmpty()) { return; } - b.startObject(simpleName); + b.startObject(); for (Map.Entry entry : metricDocValues.entrySet()) { if (metricHasValue.contains(entry.getKey())) { String metricName = entry.getKey().name(); diff --git a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java index f46508093c4ec..5fbc25eb037a7 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java @@ -8,6 +8,8 @@ import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.LuceneDocument; @@ -20,6 +22,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; import org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper.Metric; import org.hamcrest.Matchers; @@ -523,6 +526,43 @@ protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + public void testArrayValueSyntheticSource() throws Exception { + DocumentMapper mapper = createDocumentMapper( + syntheticSourceFieldMapping( + b -> b.field("type", CONTENT_TYPE) + .array("metrics", "min", "max") + .field("default_metric", "min") + .field("ignore_malformed", "true") + ) + ); + + var randomString = randomAlphaOfLength(10); + CheckedConsumer arrayValue = b -> { + b.startArray("field"); + { + b.startObject().field("max", 100).field("min", 10).endObject(); + b.startObject().field("max", 200).field("min", 20).endObject(); + b.value(randomString); + } + b.endArray(); + }; + + var expected = JsonXContent.contentBuilder().startObject(); + // First value comes from synthetic field loader and so is formatted in a specific format (e.g. min always come first). + // Other values are stored as is as part of ignore_malformed logic for synthetic source. + { + expected.startArray("field"); + expected.startObject().field("min", 10.0).field("max", 100.0).endObject(); + expected.startObject().field("max", 200).field("min", 20).endObject(); + expected.value(randomString); + expected.endArray(); + } + expected.endObject(); + + var syntheticSource = syntheticSource(mapper, arrayValue); + assertEquals(Strings.toString(expected), syntheticSource); + } + protected final class AggregateDoubleMetricSyntheticSourceSupport implements SyntheticSourceSupport { private final boolean malformedExample; private final EnumSet storedMetrics; diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 861d5a8c2f592..afd150be5fd4c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -270,7 +270,7 @@ protected Set excludeTemplates() { } protected void cleanUpResources() { - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); } protected void setUpgradeModeTo(boolean enabled) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java index 19dad8db8ef01..1b1ad986bc8a1 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java @@ -134,7 +134,7 @@ public void testMLFeatureReset() throws Exception { createdPipelines.remove("feature_reset_inference_pipeline"); assertBusy(() -> assertThat(countInferenceProcessors(clusterAdmin().prepareState().get().getState()), equalTo(0))); - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertBusy(() -> { List indices = Arrays.asList(client().admin().indices().prepareGetIndex().addIndices(".ml*").get().indices()); assertThat(indices.toString(), indices, is(empty())); @@ -150,7 +150,7 @@ public void testMLFeatureResetFailureDueToPipelines() throws Exception { createdPipelines.add("feature_reset_failure_inference_pipeline"); Exception ex = expectThrows( Exception.class, - () -> client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet() + () -> client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet() ); assertThat( ex.getMessage(), @@ -166,7 +166,7 @@ public void testMLFeatureResetFailureDueToPipelines() throws Exception { public void testMLFeatureResetWithModelDeployment() throws Exception { createModelDeployment(); - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertBusy(() -> { List indices = Arrays.asList(client().admin().indices().prepareGetIndex().addIndices(".ml*").get().indices()); assertThat(indices.toString(), indices, is(empty())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index 18cbf1728b0e4..c6e573fb3ea9c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -112,7 +112,7 @@ protected Collection> getPlugins() { public void tearDown() throws Exception { try { logger.trace("[{}#{}]: ML-specific after test cleanup", getTestClass().getSimpleName(), getTestName()); - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); } finally { super.tearDown(); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index 277a395471cb5..12eeaf8732235 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 16; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 17; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java index a482cd2c364e2..79227f3dd2cee 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java @@ -46,7 +46,8 @@ public void testFeatureUsage() throws Exception { ArchiveFeatureSetUsage archiveUsage = (ArchiveFeatureSetUsage) usage.getUsage(); assertEquals(0, archiveUsage.getNumberOfArchiveIndices()); - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().restoreSnapshot(req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -70,7 +71,8 @@ public void testFailRestoreOnInvalidLicense() throws Exception { ensureClusterSizeConsistency(); ensureClusterStateConsistency(); - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, () -> clusterAdmin().restoreSnapshot(req).actionGet() @@ -84,7 +86,8 @@ public void testFailRestoreOnTooOldVersion() { TestRepositoryPlugin.FAKE_VERSIONS_TYPE, Settings.builder().put(getRepositoryOnMaster(repoName).getMetadata().settings()).put("version", Version.fromString("2.0.0").id) ); - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); SnapshotRestoreException e = expectThrows(SnapshotRestoreException.class, () -> clusterAdmin().restoreSnapshot(req).actionGet()); assertThat( e.getMessage(), @@ -94,7 +97,8 @@ public void testFailRestoreOnTooOldVersion() { // checks that shards are failed if license becomes invalid after successful restore public void testShardAllocationOnInvalidLicense() throws Exception { - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().restoreSnapshot(req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java index 3104c91f2e5f1..df86d94da5037 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java @@ -19,7 +19,8 @@ public class ArchiveSettingValidationIntegTests extends AbstractArchiveTestCase { public void testCannotRemoveWriteBlock() throws ExecutionException, InterruptedException { - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().restoreSnapshot(req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index ec7ca2ae5b681..7ce1da3a07917 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -239,7 +239,7 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - final Map snapshotShards = clusterAdmin().prepareSnapshotStatus(fsRepoName) + final Map snapshotShards = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, fsRepoName) .setSnapshots(snapshotInfo.snapshotId().getName()) .get() .getSnapshots() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 13e5833b133d5..c738033761b3e 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -223,7 +223,10 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(fsRepoName).get().repositories().get(0); + final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, fsRepoName) + .get() + .repositories() + .get(0); assertThat(repositoryMetadata.name(), equalTo(fsRepoName)); assertThat(repositoryMetadata.uuid(), not(equalTo(RepositoryData.MISSING_UUID))); @@ -657,7 +660,7 @@ public void testSnapshotMountedIndexLeavesBlobsUntouched() throws Exception { final SnapshotId snapshotOne = createSnapshot(repositoryName, "snapshot-1", List.of(indexName)).snapshotId(); assertAcked(indicesAdmin().prepareDelete(indexName)); - final SnapshotStatus snapshotOneStatus = clusterAdmin().prepareSnapshotStatus(repositoryName) + final SnapshotStatus snapshotOneStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotOne.getName()) .get() .getSnapshots() @@ -669,7 +672,7 @@ public void testSnapshotMountedIndexLeavesBlobsUntouched() throws Exception { ensureGreen(indexName); final SnapshotId snapshotTwo = createSnapshot(repositoryName, "snapshot-2", List.of(indexName)).snapshotId(); - final SnapshotStatus snapshotTwoStatus = clusterAdmin().prepareSnapshotStatus(repositoryName) + final SnapshotStatus snapshotTwoStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotTwo.getName()) .get() .getSnapshots() @@ -792,7 +795,12 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr createRepositoryNoVerify(tmpRepositoryName, "fs"); final Path repoPath = internalCluster().getCurrentMasterNodeInstance(Environment.class) .resolveRepoFile( - clusterAdmin().prepareGetRepositories(tmpRepositoryName).get().repositories().get(0).settings().get("location") + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, tmpRepositoryName) + .get() + .repositories() + .get(0) + .settings() + .get("location") ); initWithSnapshotVersion( tmpRepositoryName, @@ -803,12 +811,12 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION ) ); - assertAcked(clusterAdmin().prepareDeleteRepository(tmpRepositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, tmpRepositoryName)); createRepository(repositoryName, "fs", repoPath); } final SnapshotId snapshotOne = createSnapshot(repositoryName, "snapshot-1", List.of(indexName)).snapshotId(); - for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(repositoryName) + for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotOne.getName()) .get() .getSnapshots()) { @@ -822,7 +830,7 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr assertAcked(indicesAdmin().prepareDelete(indexName)); assertThat( - clusterAdmin().prepareGetRepositories(repositoryName).get().repositories().get(0).uuid(), + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName).get().repositories().get(0).uuid(), hasRepositoryUuid ? not(equalTo(RepositoryData.MISSING_UUID)) : equalTo(RepositoryData.MISSING_UUID) ); @@ -847,7 +855,7 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr logger.info("--> starting to take snapshot-2"); final SnapshotId snapshotTwo = createSnapshot(backupRepositoryName, "snapshot-2", List.of(restoredIndexName)).snapshotId(); logger.info("--> finished taking snapshot-2"); - for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(backupRepositoryName) + for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, backupRepositoryName) .setSnapshots(snapshotTwo.getName()) .get() .getSnapshots()) { @@ -867,17 +875,23 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr final String restoreRepositoryName; if (hasRepositoryUuid && randomBoolean()) { // Re-mount the repository containing the actual data under a different name - final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(repositoryName).get().repositories().get(0); + final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName) + .get() + .repositories() + .get(0); // Rename the repository containing the actual data. final String newRepositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - assertAcked(clusterAdmin().prepareDeleteRepository(repositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName)); final Settings.Builder settings = Settings.builder().put(repositoryMetadata.settings()); if (randomBoolean()) { settings.put(READONLY_SETTING_KEY, "true"); } assertAcked( - clusterAdmin().preparePutRepository(newRepositoryName).setType("fs").setSettings(settings).setVerify(randomBoolean()) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, newRepositoryName) + .setType("fs") + .setSettings(settings) + .setVerify(randomBoolean()) ); restoreRepositoryName = backupRepositoryName.equals(repositoryName) ? newRepositoryName : backupRepositoryName; } else { @@ -886,7 +900,7 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr logger.info("--> starting to restore snapshot-2"); assertThat( - clusterAdmin().prepareRestoreSnapshot(restoreRepositoryName, snapshotTwo.getName()) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, restoreRepositoryName, snapshotTwo.getName()) .setIndices(restoredIndexName) .get() .status(), @@ -967,13 +981,19 @@ public void testSnapshotOfSearchableSnapshotCanBeRestoredBeforeRepositoryRegiste final SnapshotId backupSnapshot = createSnapshot(backupRepoName, "backup-snapshot", List.of(restoredIndexName)).snapshotId(); // Clear out data & the repo that contains it - final RepositoryMetadata dataRepoMetadata = clusterAdmin().prepareGetRepositories(dataRepoName).get().repositories().get(0); + final RepositoryMetadata dataRepoMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, dataRepoName) + .get() + .repositories() + .get(0); assertAcked(indicesAdmin().prepareDelete(restoredIndexName)); - assertAcked(clusterAdmin().prepareDeleteRepository(dataRepoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataRepoName)); // Restore the backup snapshot assertThat( - clusterAdmin().prepareRestoreSnapshot(backupRepoName, backupSnapshot.getName()).setIndices(restoredIndexName).get().status(), + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, backupRepoName, backupSnapshot.getName()) + .setIndices(restoredIndexName) + .get() + .status(), equalTo(RestStatus.ACCEPTED) ); @@ -1011,7 +1031,11 @@ public void testSnapshotOfSearchableSnapshotCanBeRestoredBeforeRepositoryRegiste if (randomBoolean()) { settings.put(READONLY_SETTING_KEY, "true"); } - assertAcked(clusterAdmin().preparePutRepository(newRepositoryName).setType("fs").setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, newRepositoryName) + .setType("fs") + .setSettings(settings) + ); ensureGreen(restoredIndexName); assertTotalHits(restoredIndexName, originalAllHits, originalBarHits); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java index f97151f9ae330..a3da932398fb1 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java @@ -71,7 +71,7 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered } assertAcked( - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType(FsRepository.TYPE) .setSettings( Settings.builder() @@ -86,12 +86,13 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered final String snapshotWithMountedIndices = snapshotName + "-with-mounted-indices"; createSnapshot(repositoryName, snapshotWithMountedIndices, Arrays.asList(mountedIndices)); assertAcked(indicesAdmin().prepareDelete(mountedIndices)); - assertAcked(clusterAdmin().prepareDeleteRepository(repositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName)); updatedRepositoryName = repositoryName + "-with-mounted-indices"; createRepository(updatedRepositoryName, FsRepository.TYPE, repositorySettings, randomBoolean()); final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, updatedRepositoryName, snapshotWithMountedIndices ).setWaitForCompletion(true).setIndices(mountedIndices).get(); @@ -103,7 +104,7 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered for (int i = 0; i < nbMountedIndices; i++) { RepositoryConflictException exception = expectThrows( RepositoryConflictException.class, - () -> clusterAdmin().prepareDeleteRepository(updatedRepositoryName).get() + () -> clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, updatedRepositoryName).get() ); assertThat( exception.getMessage(), @@ -118,7 +119,7 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered assertAcked(indicesAdmin().prepareDelete(mountedIndices[i])); } - assertAcked(clusterAdmin().prepareDeleteRepository(updatedRepositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, updatedRepositoryName)); } public void testMountIndexWithDeletionOfSnapshotFailsIfNotSingleIndexSnapshot() throws Exception { @@ -299,7 +300,7 @@ public void testRestoreSearchableSnapshotIndexConflicts() throws Exception { logger.info("--> restoring snapshot of searchable snapshot index [{}] should be conflicting", mountedIndex); final SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repository, snapshotOfMountedIndex) + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshotOfMountedIndex) .setIndices(mountedIndex) .setWaitForCompletion(true) .get() @@ -360,7 +361,7 @@ public void testRestoreSearchableSnapshotIndexWithDifferentSettingsConflicts() t : randomSubsetOf(randomIntBetween(1, nbMountedIndices), mountedIndices); final SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repository, snapshotOfMountedIndices) + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshotOfMountedIndices) .setIndices(restorables.toArray(String[]::new)) .setIndexSettings(deleteSnapshotIndexSettings(deleteSnapshot == false)) .setRenameReplacement("restored-with-different-setting-$1") @@ -380,7 +381,11 @@ public void testRestoreSearchableSnapshotIndexWithDifferentSettingsConflicts() t ) ); - final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repository, snapshotOfMountedIndices) + final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + snapshotOfMountedIndices + ) .setIndices(restorables.toArray(String[]::new)) .setIndexSettings(indexSettings) .setRenameReplacement("restored-with-same-setting-$1") diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java index 4f7c7f7aa0b74..0811ee86b3c32 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java @@ -55,8 +55,8 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { assertAcked(indicesAdmin().prepareDelete("mounted-*")); - assertAcked(clusterAdmin().prepareDeleteSnapshot("repository", "snapshot").get()); - assertAcked(clusterAdmin().prepareDeleteRepository("repository")); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snapshot").get()); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository")); super.tearDown(); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java index c352c37ccadf8..3a90a2b23abc6 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java @@ -113,7 +113,7 @@ public void testMountFailsIfSnapshotChanged() throws Exception { final RestoreBlockingActionFilter restoreBlockingActionFilter = getBlockingActionFilter(); restoreBlockingActionFilter.awaitExecution(); - assertAcked(clusterAdmin().prepareDeleteSnapshot(fsRepoName, snapshotName).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, fsRepoName, snapshotName).get()); createFullSnapshot(fsRepoName, snapshotName); assertFalse(responseFuture.isDone()); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java index 73e2e56b31ca5..a34bcd16c375b 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java @@ -150,14 +150,14 @@ private int createIndices() throws InterruptedException { private void createRepository(String name, String type) { assertAcked( - clusterAdmin().preparePutRepository(name) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) .setType(type) .setSettings(Settings.builder().put("location", randomRepoPath()).build()) ); } private void createSnapshot(String repository, String snapshot, int nbIndices) { - var snapshotInfo = clusterAdmin().prepareCreateSnapshot(repository, snapshot) + var snapshotInfo = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot) .setIndices("index-*") .setIncludeGlobalState(false) .setWaitForCompletion(true) diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java index 56074f97650f0..40b7e08936fa3 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java @@ -242,7 +242,7 @@ public void testPeriodicMaintenance() throws Exception { assertAcked(systemClient().admin().indices().prepareDelete(SNAPSHOT_BLOB_CACHE_INDEX)); assertAcked(indicesAdmin().prepareDelete(indicesToDelete.toArray(String[]::new))); - assertAcked(clusterAdmin().prepareDeleteRepository("repo")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo")); ensureClusterStateConsistency(); assertThat(numberOfEntriesInCache(), equalTo(0L)); @@ -253,7 +253,7 @@ public void testPeriodicMaintenance() throws Exception { ); try { // restores the .snapshot-blob-cache index with - now obsolete - documents - final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot("backup", "backup") + final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "backup", "backup") // We only want to restore the blob cache index. Since we can't do that by name, specify an index that doesn't exist and // allow no indices - this way, only the indices resolved from the feature state will be resolved. .setIndices("this-index-doesnt-exist-i-know-because-#-is-illegal-in-index-names") @@ -394,7 +394,7 @@ private Map> mountRandomIndicesWithCache(String re } else { logger.info("--> mounted index [{}] did not generate any entry in cache", mountedIndex); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repositoryName, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot).get()); assertAcked(indicesAdmin().prepareDelete(mountedIndex)); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java index 42542b63c80d1..ab38a89870500 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java @@ -153,14 +153,18 @@ public void testConcurrentPrewarming() throws Exception { } logger.debug("--> registering repository"); - assertAcked(clusterAdmin().preparePutRepository("repository").setType(FsRepository.TYPE).setSettings(repositorySettings.build())); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository") + .setType(FsRepository.TYPE) + .setSettings(repositorySettings.build()) + ); logger.debug("--> snapshotting indices"); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repository", "snapshot") - .setIncludeGlobalState(false) - .setIndices("index-*") - .setWaitForCompletion(true) - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "repository", + "snapshot" + ).setIncludeGlobalState(false).setIndices("index-*").setWaitForCompletion(true).get(); final int totalShards = shardsPerIndex.values().stream().mapToInt(i -> i).sum(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(totalShards)); @@ -172,11 +176,14 @@ public void testConcurrentPrewarming() throws Exception { assertAcked(indicesAdmin().prepareDelete("index-*")); logger.debug("--> deleting repository"); - assertAcked(clusterAdmin().prepareDeleteRepository("repository")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository")); logger.debug("--> registering tracking repository"); assertAcked( - clusterAdmin().preparePutRepository("repository").setType("tracking").setVerify(false).setSettings(repositorySettings.build()) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository") + .setType("tracking") + .setVerify(false) + .setSettings(repositorySettings.build()) ); TrackingRepositoryPlugin tracker = getTrackingRepositoryPlugin(); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java index 18e9a500a77ad..bdcce1e518700 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java @@ -237,7 +237,7 @@ protected void masterOperation( dataTierAllocationSetting.get(indexSettings); } - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repoName, snapName) + RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(request.masterNodeTimeout(), repoName, snapName) // Restore the single index specified .indices(indexName) // Always rename it to the desired mounted index name @@ -253,8 +253,6 @@ protected void masterOperation( .includeAliases(false) // Pass through the wait-for-completion flag .waitForCompletion(request.waitForCompletion()) - // Pass through the master-node timeout - .masterNodeTimeout(request.masterNodeTimeout()) // Fail the restore if the snapshot found above is swapped out from under us before the restore happens .snapshotUuid(snapshotId.getUUID()) // Log snapshot restore at the DEBUG log level diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java index 532841ecf2172..f17a0552f5834 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java @@ -253,7 +253,9 @@ public void testThatSnapshotAndRestore() throws Exception { private void waitForSnapshotToFinish(String repo, String snapshot) throws Exception { assertBusy(() -> { - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo) + .setSnapshots(snapshot) + .get(); assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for // it to disappear from the cluster state as well diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java index 74ffe762d980b..38687540e79f3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java @@ -97,7 +97,7 @@ public void testFeatureResetManageRole() { } public void testFeatureResetNoManageRole() { - final ResetFeatureStateRequest req = new ResetFeatureStateRequest(); + final ResetFeatureStateRequest req = new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT); client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("usr", SUPER_USER_PASSWD))) .admin() @@ -124,7 +124,7 @@ public void onFailure(Exception e) { } private void assertResetSuccessful(String user, SecureString password) { - final ResetFeatureStateRequest req = new ResetFeatureStateRequest(); + final ResetFeatureStateRequest req = new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT); client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(user, password))) .admin() diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java index daea0e38c2c40..2ca799e94874c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java @@ -68,7 +68,7 @@ protected Settings nodeSettings() { public void testSecurityFeatureStateSnapshotAndRestore() throws Exception { // set up a snapshot repository final String repositoryName = "test-repo"; - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType("fs") .setSettings(Settings.builder().put("location", repositoryLocation)) .get(); @@ -105,7 +105,7 @@ public void testSecurityFeatureStateSnapshotAndRestore() throws Exception { // snapshot state final String snapshotName = "security-state"; - clusterAdmin().prepareCreateSnapshot(repositoryName, snapshotName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setIndices("test_index") .setFeatureStates("LocalStateSecurity") .get(); @@ -131,7 +131,7 @@ public void testSecurityFeatureStateSnapshotAndRestore() throws Exception { client().admin().indices().prepareClose("test_index").get(); // restore state - clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setFeatureStates("LocalStateSecurity") .setIndices("test_index") .setWaitForCompletion(true) @@ -168,7 +168,9 @@ private Response performAuthenticatedRequest(Request request, String token) thro private void waitForSnapshotToFinish(String repo, String snapshot) throws Exception { assertBusy(() -> { - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo) + .setSnapshots(snapshot) + .get(); assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for // it to disappear from the cluster state as well diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 8692c999d8b35..3badd14ef8348 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -503,7 +503,7 @@ public void testSnapshotDeleteRestore() { ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("fs") .setSettings( Settings.builder() @@ -517,7 +517,7 @@ public void testSnapshotDeleteRestore() { SnapshotInfo snapshotInfo = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIncludeGlobalState(false) .setFeatureStates(SECURITY_FEATURE_NAME) @@ -540,7 +540,7 @@ public void testSnapshotDeleteRestore() { GetRolesResponse getRolesResponse = new GetRolesRequestBuilder(client()).names("test_role").get(); assertThat(getRolesResponse.roles().length, is(0)); // restore - RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1") + RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIncludeAliases(randomBoolean()) // Aliases are always restored for system indices .setFeatureStates(SECURITY_FEATURE_NAME) @@ -566,7 +566,7 @@ public void testSnapshotDeleteRestore() { .prepareCreate("idx") .get(); assertThat(createIndexResponse.isAcknowledged(), is(true)); - assertAcked(clusterAdmin().prepareDeleteRepository("test-repo")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo")); } public void testAuthenticateWithDeletedRole() { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java index f0858c81ac1c1..d04c2a4b0c578 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java @@ -46,7 +46,9 @@ public class SnapshotUserRoleIntegTests extends NativeRealmIntegTestCase { public void setupClusterBeforeSnapshot() throws IOException { logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("repo").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") + .setType("fs") + .setSettings(Settings.builder().put("location", randomRepoPath())) ); logger.info("--> creating ordinary index"); @@ -68,7 +70,7 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { // view repositories final GetRepositoriesResponse getRepositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(randomFrom("*", "_all")) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, randomFrom("*", "_all")) .get(); assertThat(getRepositoriesResponse.repositories().size(), is(1)); assertThat(getRepositoriesResponse.repositories().get(0).name(), is("repo")); @@ -83,7 +85,7 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { // create snapshot that includes restricted indices final CreateSnapshotResponse snapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("repo", "snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setIndices(randomFrom("_all", "*")) .setIndicesOptions(IndicesOptions.strictExpandHidden()) .setWaitForCompletion(true) @@ -91,7 +93,7 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { assertThat(snapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS)); assertThat(snapshotResponse.getSnapshotInfo().indices(), containsInAnyOrder(INTERNAL_SECURITY_MAIN_INDEX_7, ordinaryIndex)); // view snapshots for repo - final GetSnapshotsResponse getSnapshotResponse = client.admin().cluster().prepareGetSnapshots("repo").get(); + final GetSnapshotsResponse getSnapshotResponse = client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "repo").get(); assertThat(getSnapshotResponse.getSnapshots().size(), is(1)); assertThat(getSnapshotResponse.getSnapshots().get(0).snapshotId().getName(), is("snap")); assertThat(getSnapshotResponse.getSnapshots().get(0).indices(), containsInAnyOrder(INTERNAL_SECURITY_MAIN_INDEX_7, ordinaryIndex)); @@ -127,7 +129,7 @@ public void testSnapshotUserRoleUnathorizedForDestructiveActions() { assertThrowsAuthorizationException( () -> client.admin() .cluster() - .preparePutRepository("some_other_repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "some_other_repo") .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) .get(), @@ -136,18 +138,24 @@ public void testSnapshotUserRoleUnathorizedForDestructiveActions() { ); // try delete repo assertThrowsAuthorizationException( - () -> client.admin().cluster().prepareDeleteRepository("repo").get(), + () -> client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo").get(), "cluster:admin/repository/delete", "snapshot_user" ); // try fumble with snapshots assertThrowsAuthorizationException( - () -> client.admin().cluster().prepareRestoreSnapshot("repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)).get(), + () -> client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)) + .get(), "cluster:admin/snapshot/restore", "snapshot_user" ); assertThrowsAuthorizationException( - () -> client.admin().cluster().prepareDeleteSnapshot("repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)).get(), + () -> client.admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)) + .get(), "cluster:admin/snapshot/delete", "snapshot_user" ); diff --git a/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json b/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json index c4c74f190ddb1..8d3881c73a95f 100644 --- a/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json +++ b/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json @@ -3,7 +3,6 @@ "order" : 1000, "settings" : { "number_of_shards" : 1, - "number_of_replicas" : 0, "auto_expand_replicas" : "0-all", "analysis" : { "filter" : { diff --git a/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java index 26923c545570a..ff091d432e546 100644 --- a/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -83,7 +83,7 @@ public void testSLMWithPermissions() throws Exception { createUser("slm_admin", "slm-admin-password", "slm-manage"); createUser("slm_user", "slm-user-password", "slm-read"); - PutRepositoryRequest repoRequest = new PutRepositoryRequest(); + PutRepositoryRequest repoRequest = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); Settings.Builder settingsBuilder = Settings.builder().put("location", "."); repoRequest.settings(settingsBuilder); repoRequest.name(repo); diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java index c68e7174923f8..d93f40f7c0a82 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java @@ -247,7 +247,7 @@ public void testSettingsApplied() throws Exception { } logger.info("--> create snapshot manually"); - var request = new CreateSnapshotRequest("repo", "file-snap").waitForCompletion(true); + var request = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, "repo", "file-snap").waitForCompletion(true); var response = clusterAdmin().createSnapshot(request).get(); RestStatus status = response.getSnapshotInfo().status(); assertEquals(RestStatus.OK, status); @@ -273,7 +273,7 @@ public void testSettingsApplied() throws Exception { // Cancel/delete the snapshot try { - clusterAdmin().prepareDeleteSnapshot(REPO, snapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).get(); } catch (SnapshotMissingException e) { // ignore } diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java index a64df7f871d97..1d797095c1f69 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -150,7 +150,7 @@ public void testSnapshotInProgress() throws Exception { // Cancel/delete the snapshot try { - clusterAdmin().prepareDeleteSnapshot(REPO, snapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).get(); } catch (SnapshotMissingException e) { // ignore } @@ -263,7 +263,7 @@ public void testRetentionWhileSnapshotInProgress() throws Exception { assertBusy(() -> { try { logger.info("--> cancelling snapshot {}", secondSnapName); - clusterAdmin().prepareDeleteSnapshot(REPO, secondSnapName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPO, secondSnapName).get(); } catch (ConcurrentSnapshotExecutionException e) { logger.info("--> attempted to stop second snapshot", e); // just wait and retry @@ -385,7 +385,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex logger.info("--> verify that snapshot [{}] is {}", failedSnapshotName.get(), expectedUnsuccessfulState); assertBusy(() -> { try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(failedSnapshotName.get()) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -432,7 +432,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex assertBusy(() -> { final SnapshotInfo snapshotInfo; try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(successfulSnapshotName.get()) .get(); snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -446,7 +446,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex // Check that the failed snapshot from before still exists, now that retention has run { logger.info("--> verify that snapshot [{}] still exists", failedSnapshotName.get()); - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(failedSnapshotName.get()) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -465,7 +465,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex logger.info("--> waiting for {} snapshot [{}] to be deleted", expectedUnsuccessfulState, failedSnapshotName.get()); assertBusy(() -> { try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(failedSnapshotName.get()) .get(); assertThat(snapshotsStatusResponse.getSnapshots(), empty()); @@ -478,7 +478,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex failedSnapshotName.get(), successfulSnapshotName.get() ); - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(successfulSnapshotName.get()) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -524,7 +524,7 @@ public void testSLMRetentionAfterRestore() throws Exception { }); logger.info("--> restoring index"); - RestoreSnapshotRequest restoreReq = new RestoreSnapshotRequest(REPO, snapshotName); + RestoreSnapshotRequest restoreReq = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, snapshotName); restoreReq.indices(indexName); restoreReq.renamePattern("(.+)"); restoreReq.renameReplacement("restored_$1"); @@ -542,7 +542,9 @@ public void testSLMRetentionAfterRestore() throws Exception { logger.info("--> waiting for {} snapshot to be deleted", snapshotName); assertBusy(() -> { try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO).setSnapshots(snapshotName).get(); + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) + .setSnapshots(snapshotName) + .get(); assertThat(snapshotsStatusResponse.getSnapshots(), empty()); } catch (SnapshotMissingException e) { // This is what we want to happen @@ -552,7 +554,7 @@ public void testSLMRetentionAfterRestore() throws Exception { } private SnapshotsStatusResponse getSnapshotStatus(String snapshotName) { - return clusterAdmin().prepareSnapshotStatus(REPO).setSnapshots(snapshotName).get(); + return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, REPO).setSnapshots(snapshotName).get(); } private void createAndPopulateIndex(String indexName) throws InterruptedException { diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java index e5e71a38ce6b4..61f83319ef7ad 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java @@ -61,7 +61,7 @@ protected Collection> getPlugins() { public void testSLMIsInRunningModeWhenILMIsDisabled() throws Exception { client().execute( TransportPutRepositoryAction.TYPE, - new PutRepositoryRequest().name("repo") + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name("repo") .type("fs") .settings(Settings.builder().put("repositories.fs.location", repositoryLocation).build()) ).get(10, TimeUnit.SECONDS); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index de24c1793d483..028633a480314 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -92,7 +92,7 @@ public static Optional maybeTakeSnapshot( Optional maybeMetadata = getSnapPolicyMetadata(jobId, clusterService.state()); String snapshotName = maybeMetadata.map(policyMetadata -> { // don't time out on this request to not produce failed SLM runs in case of a temporarily slow master node - CreateSnapshotRequest request = policyMetadata.getPolicy().toRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + CreateSnapshotRequest request = policyMetadata.getPolicy().toRequest(TimeValue.MAX_VALUE); final LifecyclePolicySecurityClient clientWithHeaders = new LifecyclePolicySecurityClient( client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 71afcb4548a06..fea84e1a032dd 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -309,8 +309,7 @@ void deleteSnapshot( // don't time out on this request to not produce failed SLM runs in case of a temporarily slow master node client.admin() .cluster() - .prepareDeleteSnapshot(repo, snapshot.getName()) - .setMasterNodeTimeout(TimeValue.MAX_VALUE) + .prepareDeleteSnapshot(TimeValue.MAX_VALUE, repo, snapshot.getName()) .execute(ActionListener.wrap(acknowledgedResponse -> { slmStats.snapshotDeleted(slmPolicy); listener.onResponse(acknowledgedResponse); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index 7deee466d7292..9e5265d91dc75 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -42,11 +42,13 @@ public void testToRequest() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - CreateSnapshotRequest request = p.toRequest(); - CreateSnapshotRequest expected = new CreateSnapshotRequest().userMetadata(Collections.singletonMap("policy", "id")); + CreateSnapshotRequest request = p.toRequest(TEST_REQUEST_TIMEOUT); + CreateSnapshotRequest expected = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT).userMetadata( + Collections.singletonMap("policy", "id") + ); p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", null, null); - request = p.toRequest(); + request = p.toRequest(TEST_REQUEST_TIMEOUT); expected.waitForCompletion(true).snapshot(request.snapshot()).repository("repo"); assertEquals(expected, request); } diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index b9cde5d3a6b09..caae6dd393a0c 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -771,7 +771,7 @@ public void testRecoveryAfterRestoreUsesSnapshots() throws Exception { assertAcked(indicesAdmin().prepareDelete(indexName).get()); List restoredIndexDataNodes = internalCluster().startDataOnlyNodes(2); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, "snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap") .setIndices(indexName) .setIndexSettings( Settings.builder() @@ -1541,7 +1541,9 @@ private Store.MetadataSnapshot getMetadataSnapshot(String nodeName, String index } private long getSnapshotSizeForIndex(String repository, String snapshot, String index) { - GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(repository).addSnapshots(snapshot).get(); + GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository) + .addSnapshots(snapshot) + .get(); for (SnapshotInfo snapshotInfo : getSnapshotsResponse.getSnapshots()) { SnapshotInfo.IndexSnapshotDetails indexSnapshotDetails = snapshotInfo.indexSnapshotDetails().get(index); assertThat(indexSnapshotDetails, is(notNullValue())); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java index 45e63eb9ff31f..47b44f41f72d2 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java @@ -91,7 +91,10 @@ public void testRepositoryAnalysis() { } assertAcked( - clusterAdmin().preparePutRepository("test-repo").setVerify(false).setType(TestPlugin.ASSERTING_REPO_TYPE).setSettings(settings) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setVerify(false) + .setType(TestPlugin.ASSERTING_REPO_TYPE) + .setSettings(settings) ); final AssertingBlobStore blobStore = new AssertingBlobStore(settings.get(BASE_PATH_SETTING_KEY)); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml index b846dbe858f61..cc0e8aff9b239 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml @@ -76,7 +76,6 @@ aggregate_metric_double with ignore_malformed: index: index: test id: "1" - refresh: true body: metric: min: 18.2 @@ -88,11 +87,22 @@ aggregate_metric_double with ignore_malformed: value_count: 50 - do: - search: + index: + index: test + id: "2" + body: + metric: ["hey", {"value_count": 1, "min": 18.2,"max": 100}, [123, 456]] + + - do: + indices.refresh: {} + + - do: + get: index: test + id: "1" - match: - hits.hits.0._source: + _source: metric: min: 18.2 max: 100 @@ -102,3 +112,12 @@ aggregate_metric_double with ignore_malformed: field: "field" value_count: 50 + - do: + get: + index: test + id: "2" + + - match: + _source: + metric: [{"min": 18.2,"max": 100.0, "value_count": 1}, "hey", 123, 456] + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml index b719502ae8f28..726b9d153025e 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/histogram.yml @@ -306,3 +306,54 @@ histogram with large count values: - match: { aggregations.percent.values.1\.0: 0.2 } - match: { aggregations.percent.values.5\.0: 0.2 } - match: { aggregations.percent.values.25\.0: 0.2 } + +--- +histogram with synthetic source and ignore_malformed: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: introduced in 8.15.0 + + - do: + indices.create: + index: histo_synthetic + body: + mappings: + _source: + mode: synthetic + properties: + latency: + type: histogram + ignore_malformed: true + + - do: + index: + index: histo_synthetic + id: "1" + body: + latency: "quick brown fox" + + - do: + index: + index: histo_synthetic + id: "2" + body: + latency: [{"values": [1.0], "counts": [1], "hello": "world"}, [123, 456], {"values": [2.0], "counts": [2]}, "fox"] + + - do: + indices.refresh: {} + + - do: + get: + index: histo_synthetic + id: 1 + - match: + _source: + latency: "quick brown fox" + + - do: + get: + index: histo_synthetic + id: 2 + - match: + _source: + latency: [{"values": [2.0], "counts": [2]}, {"values": [1.0], "counts": [1], "hello": "world"}, 123, 456, "fox"] diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/150_lookup.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/150_lookup.yml index 5f76954e57c89..96bfabf862f50 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/150_lookup.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/150_lookup.yml @@ -38,7 +38,7 @@ basic: - method: POST path: /_query parameters: [] - capabilities: [lookup_command, tables_types] + capabilities: [tables_types] reason: "uses LOOKUP" - do: @@ -66,7 +66,7 @@ read multivalue keyword: - method: POST path: /_query parameters: [] - capabilities: [lookup_command, tables_types] + capabilities: [tables_types] reason: "uses LOOKUP" - do: @@ -98,7 +98,7 @@ keyword matches text: - method: POST path: /_query parameters: [] - capabilities: [lookup_command, tables_types] + capabilities: [tables_types] reason: "uses LOOKUP" - do: @@ -144,7 +144,7 @@ duplicate keys: - method: POST path: /_query parameters: [] - capabilities: [lookup_command, tables_types] + capabilities: [tables_types] reason: "uses LOOKUP" - do: @@ -167,7 +167,7 @@ multivalued keys: - method: POST path: /_query parameters: [] - capabilities: [lookup_command, tables_types] + capabilities: [tables_types] reason: "uses LOOKUP" - do: @@ -209,7 +209,7 @@ on function: - method: POST path: /_query parameters: [] - capabilities: [lookup_command, tables_types] + capabilities: [tables_types] reason: "uses LOOKUP" - do: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml new file mode 100644 index 0000000000000..fc2e22d857358 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml @@ -0,0 +1,362 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + +--- +Filter single field: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + secret: + type: keyword + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "secret":"squirrel"}' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read" ] + field_security: + grant: [ "name" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + - match: { hits.total.value: 1 } + - match: { hits.total.relation: "eq" } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.secret: squirrel } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.total.value: 1 } + - match: { hits.total.relation: "eq" } + - match: { hits.hits.0._source.name: A } + - is_false: "hits.hits.0._source.secret" + +--- +Filter fields in object: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: object + properties: + secret: + type: keyword + public: + type: keyword + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object":{ "secret":"mission", "public":"interest" }}' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read", "monitor" ] + field_security: + grant: [ "*" ] + except: [ "object.secret" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.secret: mission } + - match: { hits.hits.0._source.object.public: interest } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.public: interest } + - is_false: "_source.object.secret" + + +--- +Fields under a disabled object - uses _ignored_source: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: object + enabled: false + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object": [ { "secret":"mission1", "public":"interest1" }, { "secret":"mission2", "public":"interest2" } ] }' + - '{"create": { }}' + - '{"name": "B", "object": { "secret":"mission", "public":"interest" } }' + - '{"create": { }}' + - '{"name": "C", "object": { "foo":"bar", "public":"interest" } }' + - '{"create": { }}' + - '{"name": "D", "object": [10, 20, 30, 40] }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read", "monitor" ] + field_security: + grant: [ "*" ] + except: [ "object.secret" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + sort: name + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.0.secret: mission1 } + - match: { hits.hits.0._source.object.0.public: interest1 } + - match: { hits.hits.0._source.object.1.secret: mission2 } + - match: { hits.hits.0._source.object.1.public: interest2 } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.object.secret: mission } + - match: { hits.hits.1._source.object.public: interest } + - match: { hits.hits.2._source.name: C } + - match: { hits.hits.2._source.object.foo: bar } + - match: { hits.hits.2._source.object.public: interest } + - match: { hits.hits.3._source.name: D } + - match: { hits.hits.3._source.object: [ 10, 20, 30, 40] } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + sort: name + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.0.public: interest1 } + - match: { hits.hits.0._source.object.1.public: interest2 } + - is_false: "hits.hits.0._source.object.0.secret" + - is_false: "hits.hits.0._source.object.1.secret" + - match: { hits.hits.1._source.name: "B" } + - match: { hits.hits.1._source.object.public: interest } + - is_false: "hits.hits.1._source.object.secret" + - match: { hits.hits.2._source.name: C } + - match: { hits.hits.2._source.object.foo: bar } + - match: { hits.hits.2._source.object.public: interest } + - match: { hits.hits.3._source.name: D } + - match: { hits.hits.3._source.object: [ 10, 20, 30, 40 ] } + + +--- +Dynamic fields beyond limit - uses _ignored_source: + - do: + indices.create: + index: index_fls + body: + settings: + index: + mapping: + total_fields: + ignore_dynamic_beyond_limit: true + limit: 2 + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: object + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object":{ "secret":"mission", "public":"interest" }}' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read", "monitor" ] + field_security: + grant: [ "*" ] + except: [ "object.secret" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.secret: mission } + - match: { hits.hits.0._source.object.public: interest } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.public: interest } + - is_false: "hits.hits.0._source.object.secret" + + +--- +Field with ignored_malformed: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + secret: + type: integer + ignore_malformed: true + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "secret":"squirrel"}' + - '{"create": { }}' + - '{"name": "B", "secret": [ 10, "squirrel", 20] }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read" ] + field_security: + grant: [ "name" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + sort: name + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.secret: squirrel } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.secret: [ 10, 20, "squirrel"] } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - is_false: "hits.hits.0._source.secret" + - match: { hits.hits.1._source.name: B } + - is_false: "hits.hits.1._source.secret" diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java index 603b37d3e41f3..33d4d17367673 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java @@ -40,7 +40,7 @@ protected Settings nodeSettings() { @After public void cleanup() { - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); } protected void assertAsync( diff --git a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java index 2876fc50e036f..58e6e736b1207 100644 --- a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java @@ -186,7 +186,7 @@ public void testBasicSnapshotRestoreWorkFlow() { final String nonDedicatedVotingOnlyNode = internalCluster().startNode(dataContainingVotingOnlyNodeSettings); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("verifyaccess-fs") .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -195,7 +195,11 @@ public void testBasicSnapshotRestoreWorkFlow() { createIndex("test-idx-3"); ensureGreen(); - VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository("test-repo").get(); + VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo" + ).get(); // only the da assertEquals(3, verifyResponse.getNodes().size()); assertTrue(verifyResponse.getNodes().stream().noneMatch(nw -> nw.getName().equals(dedicatedVotingOnlyNode))); @@ -207,7 +211,7 @@ public void testBasicSnapshotRestoreWorkFlow() { Client client = client(); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices(indicesToSnapshot) .get(); @@ -219,7 +223,7 @@ public void testBasicSnapshotRestoreWorkFlow() { List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(randomFrom("test-snap", "_all", "*", "*-snap", "test*")) .get() .getSnapshots(); @@ -234,7 +238,7 @@ public void testBasicSnapshotRestoreWorkFlow() { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index a332bcb599e90..c8c72855eaf7a 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -199,7 +199,10 @@ private void beforeRestart( } Request createRepo = new Request("PUT", "/_snapshot/" + repoName); createRepo.setJsonEntity( - Strings.toString(new PutRepositoryRequest().type(sourceOnlyRepository ? "source" : "fs").settings(repoSettingsBuilder.build())) + Strings.toString( + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).type(sourceOnlyRepository ? "source" : "fs") + .settings(repoSettingsBuilder.build()) + ) ); assertAcknowledged(client().performRequest(createRepo)); @@ -279,7 +282,9 @@ private void restoreMountAndVerify( // restore index Request restoreRequest = new Request("POST", "/_snapshot/" + repoName + "/" + snapshotName + "/_restore"); restoreRequest.setJsonEntity( - Strings.toString(new RestoreSnapshotRequest().indices(indexName).renamePattern("(.+)").renameReplacement("restored_$1")) + Strings.toString( + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).indices(indexName).renamePattern("(.+)").renameReplacement("restored_$1") + ) ); restoreRequest.addParameter("wait_for_completion", "true"); Response restoreResponse = client().performRequest(restoreRequest);